All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hudi.org.apache.hadoop.hbase.generated.master.hbck_jsp Maven / Gradle / Ivy

There is a newer version: 1.0.0-beta1
Show newest version
package org.apache.hadoop.hbase.generated.master;

import javax.servlet.*;
import javax.servlet.http.*;
import javax.servlet.jsp.*;
import java.time.Instant;
import java.time.ZoneId;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.HbckChore;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
import org.apache.hadoop.hbase.master.janitor.Report;

public final class hbck_jsp extends org.apache.jasper.runtime.HttpJspBase
    implements org.apache.jasper.runtime.JspSourceDependent {


/**
 * Format serverName for display.
 * If a live server reference, make it a link.
 * If dead, make it italic.
 * If unknown, make it plain.
 */
private static String formatServerName(HMaster master,
   ServerManager serverManager, ServerName serverName) {
  String sn = serverName.toString();
  if (serverManager.isServerOnline(serverName)) {
    int infoPort = master.getRegionServerInfoPort(serverName);
    if (infoPort > 0) {
      return "" + sn + "";
    } else {
      return "" + sn + "";
    }
  } else if (serverManager.isServerDead(serverName)) {
    return "" + sn + "";
  }
  return sn;
}

  private static final JspFactory _jspxFactory = JspFactory.getDefaultFactory();

  private static java.util.List _jspx_dependants;

  private org.glassfish.jsp.api.ResourceInjector _jspx_resourceInjector;

  public java.util.List getDependants() {
    return _jspx_dependants;
  }

  public void _jspService(HttpServletRequest request, HttpServletResponse response)
        throws java.io.IOException, ServletException {

    PageContext pageContext = null;
    HttpSession session = null;
    ServletContext application = null;
    ServletConfig config = null;
    JspWriter out = null;
    Object page = this;
    JspWriter _jspx_out = null;
    PageContext _jspx_page_context = null;

    try {
      response.setContentType("text/html;charset=UTF-8");
      pageContext = _jspxFactory.getPageContext(this, request, response,
      			null, true, 8192, true);
      _jspx_page_context = pageContext;
      application = pageContext.getServletContext();
      config = pageContext.getServletConfig();
      session = pageContext.getSession();
      out = pageContext.getOut();
      _jspx_out = out;
      _jspx_resourceInjector = (org.glassfish.jsp.api.ResourceInjector) application.getAttribute("com.sun.appserv.jsp.resource.injector");

      out.write("\n\n\n\n\n\n\n\n\n\n\n\n");

  final String cacheParameterValue = request.getParameter("cache");
  final HMaster master = (HMaster) getServletContext().getAttribute(HMaster.MASTER);
  pageContext.setAttribute("pageTitle", "HBase Master HBCK Report: " + master.getServerName());
  if (!Boolean.parseBoolean(cacheParameterValue)) {
    // Run the two reporters inline w/ drawing of the page. If exception, will show in page draw.
    try {
      master.getMasterRpcServices().runHbckChore(null, null);
    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) {
      out.write("Failed generating a new hbck_chore report; using cache; try again or run hbck_chore_run in the shell: " + se.getMessage() + "\n");
    } 
    try {
      master.getMasterRpcServices().runCatalogScan(null, null);
    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) {
      out.write("Failed generating a new catalogjanitor report; using cache; try again or run catalogjanitor_run in the shell: " + se.getMessage() + "\n");
    } 
  }
  HbckChore hbckChore = master.getHbckChore();
  Map>> inconsistentRegions = null;
  Map orphanRegionsOnRS = null;
  Map orphanRegionsOnFS = null;
  long startTimestamp = 0;
  long endTimestamp = 0;
  if (hbckChore != null) {
    inconsistentRegions = hbckChore.getInconsistentRegions();
    orphanRegionsOnRS = hbckChore.getOrphanRegionsOnRS();
    orphanRegionsOnFS = hbckChore.getOrphanRegionsOnFS();
    startTimestamp = hbckChore.getCheckingStartTimestamp();
    endTimestamp = hbckChore.getCheckingEndTimestamp();
  }
  ZonedDateTime zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimestamp),
    ZoneId.systemDefault());
  String iso8601start = startTimestamp == 0? "-1": zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
  zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(endTimestamp),
    ZoneId.systemDefault());
  String iso8601end = startTimestamp == 0? "-1": zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME);
  CatalogJanitor cj = master.getCatalogJanitor();
  Report report = cj == null? null: cj.getLastReport();
  final ServerManager serverManager = master.getServerManager();

      out.write('\n');
      org.apache.jasper.runtime.JspRuntimeLibrary.include(request, response, "header.jsp" + "?" + org.apache.jasper.runtime.JspRuntimeLibrary.URLEncode("pageTitle", request.getCharacterEncoding())+ "=" + org.apache.jasper.runtime.JspRuntimeLibrary.URLEncode((java.lang.String) org.apache.jasper.runtime.PageContextImpl.evaluateExpression("${pageTitle}", java.lang.String.class, (PageContext)_jspx_page_context, null), request.getCharacterEncoding()), out, false);
      out.write("\n\n
\n "); if (!master.isInitialized()) { out.write("\n
\n
\n

Master is not initialized

\n
\n
\n "); org.apache.jasper.runtime.JspRuntimeLibrary.include(request, response, "redirect.jsp", out, false); out.write("\n "); } else { out.write("\n\n
\n
\n

This page displays two reports: the HBCK Chore Report and\n the CatalogJanitor Consistency Issues report. Only report titles\n show if there are no problems to list. Note some conditions are\n transitory as regions migrate. Reports are generated\n when you invoke this page unless you add ?cache=true to the URL. Then\n we display the reports cached from the last time the reports were run.\n Reports are run by Chores that are hosted by the Master on a cadence.\n You can also run them on demand from the hbase shell: invoke catalogjanitor_run\n and/or hbck_chore_run. \n ServerNames will be links if server is live, italic if dead, and plain if unknown.

\n
\n
\n
\n
\n

HBCK Chore Report

\n

\n "); if (hbckChore.isDisabled()) { out.write("\n HBCK chore is currently disabled. Set hbase.master.hbck.chore.interval > 0 in the config & do a rolling-restart to enable it.\n "); } else if (startTimestamp == 0 && endTimestamp == 0){ out.write("\n No report created.\n "); } else if (startTimestamp > 0 && endTimestamp == 0){ out.write("\n Checking started at "); out.print( iso8601start ); out.write(". Please wait for checking to generate a new sub-report.\n "); } else { out.write("\n Checking started at "); out.print( iso8601start ); out.write(" and generated report at "); out.print( iso8601end ); out.write(".\n "); } out.write("\n

\n
\n
\n\n "); if (inconsistentRegions != null && inconsistentRegions.size() > 0) { out.write("\n
\n
\n

Inconsistent Regions

\n
\n
\n

\n \n There are three cases: 1. Master thought this region opened, but no regionserver reported it (Fix: use assign\n command); 2. Master thought this region opened on Server1, but regionserver reported Server2 (Fix:\n need to check the server still exists. If not, schedule ServerCrashProcedure for it. If exists,\n restart Server2 and Server1):\n 3. More than one regionserver reports opened this region (Fix: restart the RegionServers).\n Note: the reported online regionservers may be not be up-to-date when there are regions in transition.\n \n

\n\n \n \n \n \n \n \n "); for (Map.Entry>> entry : inconsistentRegions.entrySet()) { out.write("\n \n \n \n \n \n "); } out.write("\n

"); out.print( inconsistentRegions.size() ); out.write(" region(s) in set.

\n
Region NameLocation in METAReported Online RegionServers
"); out.print( entry.getKey() ); out.write(""); out.print( formatServerName(master, serverManager, entry.getValue().getFirst()) ); out.write(""); out.print( entry.getValue().getSecond().stream().map(s -> formatServerName(master, serverManager, s)). collect(Collectors.joining(", ")) ); out.write("
\n "); } out.write("\n\n "); if (orphanRegionsOnRS != null && orphanRegionsOnRS.size() > 0) { out.write("\n
\n
\n

Orphan Regions on RegionServer

\n
\n
\n\n \n \n \n \n \n "); for (Map.Entry entry : orphanRegionsOnRS.entrySet()) { out.write("\n \n \n \n \n "); } out.write("\n

"); out.print( orphanRegionsOnRS.size() ); out.write(" region(s) in set.

\n
Region NameReported Online RegionServer
"); out.print( entry.getKey() ); out.write(""); out.print( formatServerName(master, serverManager, entry.getValue()) ); out.write("
\n "); } out.write("\n\n "); if (orphanRegionsOnFS != null && orphanRegionsOnFS.size() > 0) { out.write("\n
\n
\n

Orphan Regions on FileSystem

\n
\n
\n

\n \n The below are Regions we've lost account of. To be safe, run bulk load of any data found under these Region orphan directories to have the\n cluster re-adopt data.\n First make sure hbase:meta is in a healthy state, that there are no holes, overlaps or inconsistencies (else bulk load may fail);\n run hbck2 fixMeta. Once this is done, per Region below, run a bulk\n load -- $ hbase completebulkload REGION_DIR_PATH TABLE_NAME -- and then delete the desiccated directory content (HFiles are removed upon\n successful load; all that is left are empty directories and occasionally a seqid marking file).\n \n

\n \n \n \n \n \n "); for (Map.Entry entry : orphanRegionsOnFS.entrySet()) { out.write("\n \n \n \n \n "); } out.write("\n\n

"); out.print( orphanRegionsOnFS.size() ); out.write(" region(s) in set.

\n
Region Encoded NameFileSystem Path
"); out.print( entry.getKey() ); out.write(""); out.print( entry.getValue() ); out.write("
\n "); } out.write("\n\n "); zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(System.currentTimeMillis()), ZoneId.systemDefault()); String iso8601Now = zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); String iso8601reportTime = "-1"; if (report != null) { zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(report.getCreateTime()), ZoneId.systemDefault()); iso8601reportTime = zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); } out.write("\n
\n
\n

CatalogJanitor hbase:meta Consistency Issues

\n

\n "); if (report != null) { out.write("\n Report created: "); out.print( iso8601reportTime ); out.write(" (now="); out.print( iso8601Now ); out.write(").

\n "); } else { out.write("\n No report created.\n "); } out.write("\n
\n
\n "); if (report != null && !report.isEmpty()) { out.write("\n "); if (!report.getHoles().isEmpty()) { out.write("\n
\n
\n

Holes

\n
\n
\n \n \n \n \n \n "); for (Pair p : report.getHoles()) { out.write("\n \n \n \n \n "); } out.write("\n\n

"); out.print( report.getHoles().size() ); out.write(" hole(s).

\n
RegionInfoRegionInfo
'); out.print( p.getFirst().getRegionNameAsString() ); out.write("'); out.print( p.getSecond().getRegionNameAsString() ); out.write("
\n "); } out.write("\n "); if (!report.getOverlaps().isEmpty()) { out.write("\n
\n
\n

Overlaps

\n

\n \n Regions highlighted in blue are recently merged regions, HBase is still doing cleanup for them. Overlaps involving these regions cannot be fixed by hbck2 fixMeta at this moment.\n Please wait some time, run catalogjanitor_run in hbase shell, refresh ‘HBCK Report’ page, make sure these regions are not highlighted to start the fix.\n \n

\n
\n
\n \n \n \n \n \n "); for (Pair p : report.getOverlaps()) { out.write("\n \n "); if (report.getMergedRegions().containsKey(p.getFirst())) { out.write("\n \n "); } else { out.write("\n \n "); } out.write("\n "); if (report.getMergedRegions().containsKey(p.getSecond())) { out.write("\n \n "); } else { out.write("\n \n "); } out.write("\n \n "); } out.write("\n\n

"); out.print( report.getOverlaps().size() ); out.write(" overlap(s).

\n
RegionInfoOther RegionInfo
'); out.print( p.getFirst().getRegionNameAsString() ); out.write("'); out.print( p.getFirst().getRegionNameAsString() ); out.write("'); out.print( p.getSecond().getRegionNameAsString() ); out.write("'); out.print( p.getSecond().getRegionNameAsString() ); out.write("
\n "); } out.write("\n "); if (!report.getUnknownServers().isEmpty()) { out.write("\n
\n
\n

Unknown Servers

\n
\n
\n

\n The below are servers mentioned in the hbase:meta table that are no longer 'live' or known 'dead'.\n The server likely belongs to an older cluster epoch since replaced by a new instance because of a restart/crash.\n To clear 'Unknown Servers', run 'hbck2 scheduleRecoveries UNKNOWN_SERVERNAME'. This will schedule a ServerCrashProcedure.\n It will clear out 'Unknown Server' references and schedule reassigns of any Regions that were associated with this host.\n But first!, be sure the referenced Region is not currently stuck looping trying to OPEN. Does it show as a Region-In-Transition on the\n Master home page? Is it mentioned in the 'Procedures and Locks' Procedures list? If so, perhaps it stuck in a loop\n trying to OPEN but unable to because of a missing reference or file.\n"); out.write(" Read the Master log looking for the most recent\n mentions of the associated Region name. Try and address any such complaint first. If successful, a side-effect\n should be the clean up of the 'Unknown Servers' list. It may take a while. OPENs are retried forever but the interval\n between retries grows. The 'Unknown Server' may be cleared because it is just the last RegionServer the Region was\n successfully opened on; on the next open, the 'Unknown Server' will be purged.\n \n

\n \n \n \n \n \n "); for (Pair p: report.getUnknownServers()) { out.write("\n \n \n \n \n "); } out.write("\n\n

"); out.print( report.getUnknownServers().size() ); out.write(" unknown servers(s).

\n
RegionInfoServerName
'); out.print( p.getFirst().getRegionNameAsString() ); out.write(""); out.print( p.getSecond() ); out.write("
\n "); } out.write("\n "); if (!report.getEmptyRegionInfo().isEmpty()) { out.write("\n
\n
\n

Empty info:regioninfo

\n
\n
\n \n \n \n \n "); for (byte [] row: report.getEmptyRegionInfo()) { out.write("\n \n \n \n "); } out.write("\n\n

"); out.print( report.getEmptyRegionInfo().size() ); out.write(" emptyRegionInfo(s).

\n
Row
"); out.print( Bytes.toStringBinary(row) ); out.write("
\n "); } out.write("\n "); } out.write("\n\n "); } out.write("\n
\n\n"); org.apache.jasper.runtime.JspRuntimeLibrary.include(request, response, "footer.jsp", out, false); out.write('\n'); out.write('\n'); out.write('\n'); } catch (Throwable t) { if (!(t instanceof SkipPageException)){ out = _jspx_out; if (out != null && out.getBufferSize() != 0) out.clearBuffer(); if (_jspx_page_context != null) _jspx_page_context.handlePageException(t); else throw new ServletException(t); } } finally { _jspxFactory.releasePageContext(_jspx_page_context); } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy