org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper Maven / Gradle / Ivy
The newest version!
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.net.InetAddress;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.JspWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
class NamenodeJspHelper {
static String getSafeModeText(FSNamesystem fsn) {
if (!fsn.isInSafeMode())
return "";
return "Safe mode is ON. " + fsn.getSafeModeTip() + "
";
}
/**
* returns security mode of the cluster (namenode)
* @return "on" if security is on, and "off" otherwise
*/
static String getSecurityModeText() {
if(UserGroupInformation.isSecurityEnabled()) {
return "Security is ON
";
} else {
return "Security is OFF
";
}
}
static String getInodeLimitText(FSNamesystem fsn) {
long inodes = fsn.dir.totalInodes();
long blocks = fsn.getBlocksTotal();
long maxobjects = fsn.getMaxObjects();
MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
MemoryUsage heap = mem.getHeapMemoryUsage();
long totalMemory = heap.getUsed();
long maxMemory = heap.getMax();
long commitedMemory = heap.getCommitted();
MemoryUsage nonHeap = mem.getNonHeapMemoryUsage();
long totalNonHeap = nonHeap.getUsed();
long maxNonHeap = nonHeap.getMax();
long commitedNonHeap = nonHeap.getCommitted();
long used = (totalMemory * 100) / commitedMemory;
long usedNonHeap = (totalNonHeap * 100) / commitedNonHeap;
String str = inodes + " files and directories, " + blocks + " blocks = "
+ (inodes + blocks) + " total";
if (maxobjects != 0) {
long pct = ((inodes + blocks) * 100) / maxobjects;
str += " / " + maxobjects + " (" + pct + "%)";
}
str += ".
";
str += "Heap Memory used " + StringUtils.byteDesc(totalMemory) + " is "
+ " " + used + "% of Commited Heap Memory "
+ StringUtils.byteDesc(commitedMemory)
+ ". Max Heap Memory is " + StringUtils.byteDesc(maxMemory) +
".
";
str += "Non Heap Memory used " + StringUtils.byteDesc(totalNonHeap) + " is"
+ " " + usedNonHeap + "% of " + " Commited Non Heap Memory "
+ StringUtils.byteDesc(commitedNonHeap) + ". Max Non Heap Memory is "
+ StringUtils.byteDesc(maxNonHeap) + ".
";
return str;
}
static String getUpgradeStatusText(FSNamesystem fsn) {
String statusText = "";
try {
UpgradeStatusReport status = fsn
.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
statusText = (status == null ? "There are no upgrades in progress."
: status.getStatusText(false));
} catch (IOException e) {
statusText = "Upgrade status unknown.";
}
return statusText;
}
/** Return a table containing version information. */
static String getVersionTable(FSNamesystem fsn) {
return ""
+ "\n Started: " + fsn.getStartTime()
+ " \n" + "\n Version: "
+ VersionInfo.getVersion() + ", " + VersionInfo.getRevision()
+ "\n Compiled: " + VersionInfo.getDate()
+ " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
+ "\n Upgrades: "
+ getUpgradeStatusText(fsn) + "\n
";
}
/**
* Generate warning text if there are corrupt files.
* @return a warning if files are corrupt, otherwise return an empty string.
*/
static String getCorruptFilesWarning(FSNamesystem fsn) {
long missingBlocks = fsn.getMissingBlocksCount();
if (missingBlocks > 0) {
StringBuilder result = new StringBuilder();
// Warning class is typically displayed in RED
result.append("
\n");
result.append("WARNING : There are " + missingBlocks
+ " missing blocks. Please check the logs or run fsck in order to identify the missing blocks.");
result.append("");
result.append("
See the Hadoop FAQ for common causes and potential solutions.");
result.append("
\n");
return result.toString();
}
return "";
}
static class HealthJsp {
private int rowNum = 0;
private int colNum = 0;
private String sorterField = null;
private String sorterOrder = null;
private String rowTxt() {
colNum = 0;
return " ";
}
private String colTxt() {
return " ";
}
private void counterReset() {
colNum = 0;
rowNum = 0;
}
void generateConfReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSNamesystem fsn = nn.getNamesystem();
FSImage fsImage = fsn.getFSImage();
List removedStorageDirs = fsImage
.getRemovedStorageDirs();
// FS Image storage configuration
out.print(" " + nn.getRole() + " Storage:
");
out.print(" \n"
+ "Storage Directory Type State ");
StorageDirectory st = null;
for (Iterator it = fsImage.dirIterator(); it.hasNext();) {
st = it.next();
String dir = "" + st.getRoot();
String type = "" + st.getStorageDirType();
out.print("" + dir + " " + type
+ " Active ");
}
long storageDirsSize = removedStorageDirs.size();
for (int i = 0; i < storageDirsSize; i++) {
st = removedStorageDirs.get(i);
String dir = "" + st.getRoot();
String type = "" + st.getStorageDirType();
out.print("" + dir + " " + type
+ " Failed ");
}
out.print("
\n");
}
void generateHealthReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSNamesystem fsn = nn.getNamesystem();
ArrayList live = new ArrayList();
ArrayList dead = new ArrayList();
fsn.DFSNodesStatus(live, dead);
ArrayList decommissioning = fsn
.getDecommissioningNodes();
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
if (sorterField == null)
sorterField = "name";
if (sorterOrder == null)
sorterOrder = "ASC";
// Find out common suffix. Should this be before or after the sort?
String port_suffix = null;
if (live.size() > 0) {
String name = live.get(0).getName();
int idx = name.indexOf(':');
if (idx > 0) {
port_suffix = name.substring(idx);
}
for (int i = 1; port_suffix != null && i < live.size(); i++) {
if (live.get(i).getName().endsWith(port_suffix) == false) {
port_suffix = null;
break;
}
}
}
counterReset();
long[] fsnStats = fsn.getStats();
long total = fsnStats[0];
long remaining = fsnStats[2];
long used = fsnStats[1];
long nonDFS = total - remaining - used;
nonDFS = nonDFS < 0 ? 0 : nonDFS;
float percentUsed = total <= 0 ? 0f : ((float) used * 100.0f)
/ (float) total;
float percentRemaining = total <= 0 ? 100f : ((float) remaining * 100.0f)
/ (float) total;
float median = 0;
float max = 0;
float min = 0;
float dev = 0;
if (live.size() > 0) {
float totalDfsUsed = 0;
float[] usages = new float[live.size()];
int i = 0;
for (DatanodeDescriptor dn : live) {
usages[i++] = dn.getDfsUsedPercent();
totalDfsUsed += dn.getDfsUsedPercent();
}
totalDfsUsed /= live.size();
Arrays.sort(usages);
median = usages[usages.length/2];
max = usages[usages.length - 1];
min = usages[0];
for (i = 0; i < usages.length; i++) {
dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
}
dev = (float) Math.sqrt(dev/usages.length);
}
out.print(" \n" + rowTxt() + colTxt()
+ "Configured Capacity" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(total) + rowTxt() + colTxt() + "DFS Used"
+ colTxt() + ":" + colTxt() + StringUtils.byteDesc(used) + rowTxt()
+ colTxt() + "Non DFS Used" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(nonDFS) + rowTxt() + colTxt()
+ "DFS Remaining" + colTxt() + ":" + colTxt()
+ StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%"
+ colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt()
+ colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
+ StringUtils.limitDecimalTo2(percentRemaining) + " %"
+ rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
+ "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
+ "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
+ StringUtils.limitDecimalTo2(min) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(median) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(max) + " %"
+ colTxt() + StringUtils.limitDecimalTo2(dev) + " %"
+ rowTxt() + colTxt()
+ "Live Nodes "
+ colTxt() + ":" + colTxt() + live.size() + rowTxt() + colTxt()
+ "Dead Nodes "
+ colTxt() + ":" + colTxt() + dead.size() + rowTxt() + colTxt()
+ ""
+ "Decommissioning Nodes "
+ colTxt() + ":" + colTxt() + decommissioning.size()
+ rowTxt() + colTxt()
+ "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
+ fsn.getUnderReplicatedBlocks()
+ "
\n");
if (live.isEmpty() && dead.isEmpty()) {
out.print("There are no datanodes in the cluster");
}
}
}
static String getDelegationToken(final NameNode nn,
HttpServletRequest request, Configuration conf) throws IOException,
InterruptedException {
final UserGroupInformation ugi = JspHelper.getUGI(request, conf);
Token token = ugi
.doAs(new PrivilegedExceptionAction>() {
public Token run() throws IOException {
return nn.getDelegationToken(new Text(ugi.getUserName()));
}
});
return token == null ? null : token.encodeToUrlString();
}
static void redirectToRandomDataNode(final NameNode nn,
HttpServletRequest request,
HttpServletResponse resp,
Configuration conf
) throws IOException,
InterruptedException {
final DatanodeID datanode = nn.getNamesystem().getRandomDatanode();
String tokenString = getDelegationToken(nn, request, conf);
// if the user is defined, get a delegation token and stringify it
final String redirectLocation;
final String nodeToRedirect;
int redirectPort;
if (datanode != null) {
nodeToRedirect = datanode.getHost();
redirectPort = datanode.getInfoPort();
} else {
nodeToRedirect = nn.getHttpAddress().getHostName();
redirectPort = nn.getHttpAddress().getPort();
}
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = "http://" + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort="
+ nn.getHttpAddress().getPort() + "&dir=/"
+ (tokenString == null ? "" :
JspHelper.getDelegationTokenUrlParam(tokenString));
resp.sendRedirect(redirectLocation);
}
static class NodeListJsp {
private int rowNum = 0;
private long diskBytes = 1024 * 1024 * 1024;
private String diskByteStr = "GB";
private String sorterField = null;
private String sorterOrder = null;
private String whatNodes = "LIVE";
private String rowTxt() {
return " ";
}
private void counterReset() {
rowNum = 0;
}
private String nodeHeaderStr(String name) {
String ret = "class=header";
String order = "ASC";
if (name.equals(sorterField)) {
ret += sorterOrder;
if (sorterOrder.equals("ASC"))
order = "DSC";
}
ret += " onClick=\"window.document.location="
+ "'/dfsnodelist.jsp?whatNodes=" + whatNodes + "&sorter/field="
+ name + "&sorter/order=" + order
+ "'\" title=\"sort on this column\"";
return ret;
}
private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d,
String suffix, boolean alive, int nnHttpPort) throws IOException {
// from nn_browsedfscontent.jsp:
String url = "http://" + d.getHostName() + ":" + d.getInfoPort()
+ "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir="
+ URLEncoder.encode("/", "UTF-8");
String name = d.getHostName() + ":" + d.getPort();
if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
name = name.replaceAll("\\.[^.:]*", "");
int idx = (suffix != null && name.endsWith(suffix)) ? name
.indexOf(suffix) : -1;
out.print(rowTxt() + ""
+ ((idx > 0) ? name.substring(0, idx) : name) + ""
+ ((alive) ? "" : "\n"));
}
void generateDecommissioningNodeData(JspWriter out, DatanodeDescriptor d,
String suffix, boolean alive, int nnHttpPort) throws IOException {
generateNodeDataHeader(out, d, suffix, alive, nnHttpPort);
if (!alive) {
return;
}
long decommRequestTime = d.decommissioningStatus.getStartTime();
long timestamp = d.getLastUpdate();
long currentTime = System.currentTimeMillis();
long hoursSinceDecommStarted = (currentTime - decommRequestTime)/3600000;
long remainderMinutes = ((currentTime - decommRequestTime)/60000) % 60;
out.print(" "
+ ((currentTime - timestamp) / 1000)
+ " "
+ d.decommissioningStatus.getUnderReplicatedBlocks()
+ " "
+ d.decommissioningStatus.getDecommissionOnlyReplicas()
+ " "
+ d.decommissioningStatus.getUnderReplicatedInOpenFiles()
+ " "
+ hoursSinceDecommStarted + " hrs " + remainderMinutes + " mins"
+ "\n");
}
void generateNodeData(JspWriter out, DatanodeDescriptor d,
String suffix, boolean alive, int nnHttpPort) throws IOException {
/*
* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use:
* 1) d.getHostName():d.getPort() to display. Domain and port are stripped
* if they are common across the nodes. i.e. "dn1"
* 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010"
* 3) d.getHostName():d.getInfoPort() for url.
* i.e. "http://dn1.hadoop.apache.org:50075/..."
* Note that "d.getHost():d.getPort()" is what DFS clients use to
* interact with datanodes.
*/
generateNodeDataHeader(out, d, suffix, alive, nnHttpPort);
if (!alive)
return;
long c = d.getCapacity();
long u = d.getDfsUsed();
long nu = d.getNonDfsUsed();
long r = d.getRemaining();
String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());
String percentRemaining = StringUtils.limitDecimalTo2(d
.getRemainingPercent());
String adminState = (d.isDecommissioned() ? "Decommissioned" : (d
.isDecommissionInProgress() ? "Decommission In Progress"
: "In Service"));
long timestamp = d.getLastUpdate();
long currentTime = System.currentTimeMillis();
out.print(" "
+ ((currentTime - timestamp) / 1000)
+ " "
+ adminState
+ " "
+ StringUtils.limitDecimalTo2(c * 1.0 / diskBytes)
+ " "
+ StringUtils.limitDecimalTo2(u * 1.0 / diskBytes)
+ " "
+ StringUtils.limitDecimalTo2(nu * 1.0 / diskBytes)
+ " "
+ StringUtils.limitDecimalTo2(r * 1.0 / diskBytes)
+ " "
+ percentUsed
+ " "
+ ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed),
100) + " "
+ percentRemaining + " " + d.numBlocks()
+ " "
+ d.getVolumeFailures() + "\n");
}
void generateNodesList(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
ArrayList live = new ArrayList();
ArrayList dead = new ArrayList();
nn.getNamesystem().DFSNodesStatus(live, dead);
whatNodes = request.getParameter("whatNodes"); // show only live or only
// dead nodes
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
if (sorterField == null)
sorterField = "name";
if (sorterOrder == null)
sorterOrder = "ASC";
JspHelper.sortNodeList(live, sorterField, sorterOrder);
JspHelper.sortNodeList(dead, "name", "ASC");
// Find out common suffix. Should this be before or after the sort?
String port_suffix = null;
if (live.size() > 0) {
String name = live.get(0).getName();
int idx = name.indexOf(':');
if (idx > 0) {
port_suffix = name.substring(idx);
}
for (int i = 1; port_suffix != null && i < live.size(); i++) {
if (live.get(i).getName().endsWith(port_suffix) == false) {
port_suffix = null;
break;
}
}
}
counterReset();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (live.isEmpty() && dead.isEmpty()) {
out.print("There are no datanodes in the cluster");
} else {
int nnHttpPort = nn.getHttpAddress().getPort();
out.print(" ");
if (whatNodes.equals("LIVE")) {
out.print("" + "Live Datanodes : "
+ live.size() + ""
+ "
\n\n");
counterReset();
if (live.size() > 0) {
if (live.get(0).getCapacity() > 1024 * diskBytes) {
diskBytes *= 1024;
diskByteStr = "TB";
}
out.print(" Node Last
Contact Admin State Configured
Capacity (" + diskByteStr + ") Used
(" + diskByteStr
+ ") Non DFS
Used (" + diskByteStr + ") Remaining
("
+ diskByteStr + ") Used
(%) Used
(%) Remaining
(%) Blocks Failed Volumes\n");
JspHelper.sortNodeList(live, sorterField, sorterOrder);
for (int i = 0; i < live.size(); i++) {
generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
}
}
out.print("
\n");
} else if (whatNodes.equals("DEAD")) {
out.print("
"
+ " Dead Datanodes : " + dead.size() + "
\n");
if (dead.size() > 0) {
out.print(" "
+ " Node \n");
JspHelper.sortNodeList(dead, "name", "ASC");
for (int i = 0; i < dead.size(); i++) {
generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
}
out.print("
\n");
}
} else if (whatNodes.equals("DECOMMISSIONING")) {
// Decommissioning Nodes
ArrayList decommissioning = nn.getNamesystem()
.getDecommissioningNodes();
out.print("
"
+ " Decommissioning Datanodes : " + decommissioning.size()
+ "
\n");
if (decommissioning.size() > 0) {
out.print(" "
+ " Node Last
Contact Under Replicated Blocks Blocks With No
Live Replicas Under Replicated Blocks
In Files Under Construction"
+ " Time Since Decommissioning Started"
);
JspHelper.sortNodeList(decommissioning, "name", "ASC");
for (int i = 0; i < decommissioning.size(); i++) {
generateDecommissioningNodeData(out, decommissioning.get(i),
port_suffix, true, nnHttpPort);
}
out.print("
\n");
}
}
out.print(" ");
}
}
}
// utility class used in block_info_xml.jsp
static class XMLBlockInfo {
final Block block;
final INodeFile inode;
final FSNamesystem fsn;
public XMLBlockInfo(FSNamesystem fsn, Long blockId) {
this.fsn = fsn;
if (blockId == null) {
this.block = null;
this.inode = null;
} else {
this.block = new Block(blockId);
this.inode = fsn.blockManager.getINode(block);
}
}
private String getLocalParentDir(INode inode) {
StringBuilder pathBuf = new StringBuilder();
INode node = inode;
// loop up to directory root, prepending each directory name to buffer
while ((node = node.getParent()) != null && node.getLocalName() != "") {
pathBuf.insert(0, '/').insert(0, node.getLocalName());
}
return pathBuf.toString();
}
public void toXML(XMLOutputter doc) throws IOException {
doc.startTag("block_info");
if (block == null) {
doc.startTag("error");
doc.pcdata("blockId must be a Long");
doc.endTag();
}else{
doc.startTag("block_id");
doc.pcdata(""+block.getBlockId());
doc.endTag();
doc.startTag("block_name");
doc.pcdata(block.getBlockName());
doc.endTag();
if (inode != null) {
doc.startTag("file");
doc.startTag("local_name");
doc.pcdata(inode.getLocalName());
doc.endTag();
doc.startTag("local_directory");
doc.pcdata(getLocalParentDir(inode));
doc.endTag();
doc.startTag("user_name");
doc.pcdata(inode.getUserName());
doc.endTag();
doc.startTag("group_name");
doc.pcdata(inode.getGroupName());
doc.endTag();
doc.startTag("is_directory");
doc.pcdata(""+inode.isDirectory());
doc.endTag();
doc.startTag("access_time");
doc.pcdata(""+inode.getAccessTime());
doc.endTag();
doc.startTag("is_under_construction");
doc.pcdata(""+inode.isUnderConstruction());
doc.endTag();
doc.startTag("ds_quota");
doc.pcdata(""+inode.getDsQuota());
doc.endTag();
doc.startTag("permission_status");
doc.pcdata(inode.getPermissionStatus().toString());
doc.endTag();
doc.startTag("replication");
doc.pcdata(""+inode.getReplication());
doc.endTag();
doc.startTag("disk_space_consumed");
doc.pcdata(""+inode.diskspaceConsumed());
doc.endTag();
doc.startTag("preferred_block_size");
doc.pcdata(""+inode.getPreferredBlockSize());
doc.endTag();
doc.endTag(); //
}
doc.startTag("replicas");
if (fsn.blockManager.blocksMap.contains(block)) {
Iterator it =
fsn.blockManager.blocksMap.nodeIterator(block);
while (it.hasNext()) {
doc.startTag("replica");
DatanodeDescriptor dd = it.next();
doc.startTag("host_name");
doc.pcdata(dd.getHostName());
doc.endTag();
boolean isCorrupt = fsn.getCorruptReplicaBlockIds(0,
block.getBlockId()) != null;
doc.startTag("is_corrupt");
doc.pcdata(""+isCorrupt);
doc.endTag();
doc.endTag(); //
}
}
doc.endTag(); //
}
doc.endTag(); //
}
}
// utility class used in corrupt_replicas_xml.jsp
static class XMLCorruptBlockInfo {
final FSNamesystem fsn;
final Configuration conf;
final Long startingBlockId;
final int numCorruptBlocks;
public XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
int numCorruptBlocks, Long startingBlockId) {
this.fsn = fsn;
this.conf = conf;
this.numCorruptBlocks = numCorruptBlocks;
this.startingBlockId = startingBlockId;
}
public void toXML(XMLOutputter doc) throws IOException {
doc.startTag("corrupt_block_info");
if (numCorruptBlocks < 0 || numCorruptBlocks > 100) {
doc.startTag("error");
doc.pcdata("numCorruptBlocks must be >= 0 and <= 100");
doc.endTag();
}
doc.startTag("dfs_replication");
doc.pcdata(""+conf.getInt("dfs.replication", 3));
doc.endTag();
doc.startTag("num_missing_blocks");
doc.pcdata(""+fsn.getMissingBlocksCount());
doc.endTag();
doc.startTag("num_corrupt_replica_blocks");
doc.pcdata(""+fsn.getCorruptReplicaBlocks());
doc.endTag();
doc.startTag("corrupt_replica_block_ids");
long[] corruptBlockIds
= fsn.getCorruptReplicaBlockIds(numCorruptBlocks,
startingBlockId);
if (corruptBlockIds != null) {
for (Long blockId: corruptBlockIds) {
doc.startTag("block_id");
doc.pcdata(""+blockId);
doc.endTag();
}
}
doc.endTag(); //
doc.endTag(); //
doc.getWriter().flush();
}
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy