Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.JspWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.XMLOutputter;
import com.google.common.base.Preconditions;
class NamenodeJspHelper {
static String fraction2String(double value) {
return StringUtils.format("%.2f", value);
}
static String fraction2String(long numerator, long denominator) {
return fraction2String(numerator/(double)denominator);
}
static String getSafeModeText(FSNamesystem fsn) {
if (!fsn.isInSafeMode())
return "";
return "Safe mode is ON. " + fsn.getSafeModeTip() + " ";
}
/**
* returns security mode of the cluster (namenode)
* @return "on" if security is on, and "off" otherwise
*/
static String getSecurityModeText() {
if(UserGroupInformation.isSecurityEnabled()) {
return "
Security is ON
";
} else {
return "
Security is OFF
";
}
}
static String getInodeLimitText(FSNamesystem fsn) {
long inodes = fsn.dir.totalInodes();
long blocks = fsn.getBlocksTotal();
long maxobjects = fsn.getMaxObjects();
MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
MemoryUsage heap = mem.getHeapMemoryUsage();
long totalMemory = heap.getUsed();
long maxMemory = heap.getMax();
long commitedMemory = heap.getCommitted();
MemoryUsage nonHeap = mem.getNonHeapMemoryUsage();
long totalNonHeap = nonHeap.getUsed();
long maxNonHeap = nonHeap.getMax();
long commitedNonHeap = nonHeap.getCommitted();
long used = (totalMemory * 100) / commitedMemory;
long usedNonHeap = (totalNonHeap * 100) / commitedNonHeap;
String str = "
" + VersionInfo.getDate()
+ " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
+ "
\n
Upgrades:
"
+ getUpgradeStatusText(fsn)
+ "
\n
Cluster ID:
" + fsn.getClusterId()
+ "
\n
Block Pool ID:
" + fsn.getBlockPoolId()
+ "
\n
";
}
/**
* Generate warning text if there are corrupt files.
* @return a warning if files are corrupt, otherwise return an empty string.
*/
static String getCorruptFilesWarning(FSNamesystem fsn) {
long missingBlocks = fsn.getMissingBlocksCount();
if (missingBlocks > 0) {
StringBuilder result = new StringBuilder();
// Warning class is typically displayed in RED.
result.append("
");
StorageDirectory st = null;
for (Iterator it
= fsImage.getStorage().dirIterator(); it.hasNext();) {
st = it.next();
String dir = "" + st.getRoot();
String type = "" + st.getStorageDirType();
out.print("
" + dir + "
" + type
+ "
Active
");
}
long storageDirsSize = removedStorageDirs.size();
for (int i = 0; i < storageDirsSize; i++) {
st = removedStorageDirs.get(i);
String dir = "" + st.getRoot();
String type = "" + st.getStorageDirType();
out.print("
" + dir + "
" + type
+ "
Failed
");
}
out.print("
\n");
}
/**
* Generate an HTML report containing the current status of the HDFS
* journals.
*/
void generateJournalReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSEditLog log = nn.getFSImage().getEditLog();
Preconditions.checkArgument(log != null, "no edit log set in %s", nn);
out.println("
"
+ hoursSinceDecommStarted + " hrs " + remainderMinutes + " mins"
+ "\n");
}
void generateNodeData(JspWriter out, DatanodeDescriptor d, String suffix,
boolean alive, int nnHttpPort, String nnaddr) throws IOException {
/*
* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5 we use:
* 1) d.getHostName():d.getPort() to display. Domain and port are stripped
* if they are common across the nodes. i.e. "dn1"
* 2) d.getHost():d.Port() for "title". i.e. "192.168.0.5:50010"
* 3) d.getHostName():d.getInfoPort() for url.
* i.e. "http://dn1.hadoop.apache.org:50075/..."
* Note that "d.getHost():d.getPort()" is what DFS clients use to
* interact with datanodes.
*/
generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr);
if (!alive) {
out.print("
" +
d.isDecommissioned() + "\n");
return;
}
long c = d.getCapacity();
long u = d.getDfsUsed();
long nu = d.getNonDfsUsed();
long r = d.getRemaining();
final double percentUsedValue = d.getDfsUsedPercent();
String percentUsed = fraction2String(percentUsedValue);
String percentRemaining = fraction2String(d.getRemainingPercent());
String adminState = d.getAdminState().toString();
long timestamp = d.getLastUpdate();
long currentTime = Time.now();
long bpUsed = d.getBlockPoolUsed();
String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent());
out.print("
"
+ d.getVolumeFailures() + "\n");
}
void generateNodesList(ServletContext context, JspWriter out,
HttpServletRequest request) throws IOException {
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
final FSNamesystem ns = nn.getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
final List live = new ArrayList();
final List dead = new ArrayList();
dm.fetchDatanodes(live, dead, true);
InetSocketAddress nnSocketAddress = (InetSocketAddress) context
.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
+ nnSocketAddress.getPort();
whatNodes = request.getParameter("whatNodes"); // show only live or only
// dead nodes
if (null == whatNodes || whatNodes.isEmpty()) {
out.print("Invalid input");
return;
}
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
if (sorterField == null)
sorterField = "name";
if (sorterOrder == null)
sorterOrder = "ASC";
JspHelper.sortNodeList(live, sorterField, sorterOrder);
// Find out common suffix. Should this be before or after the sort?
String port_suffix = null;
if (live.size() > 0) {
String name = live.get(0).getXferAddr();
int idx = name.indexOf(':');
if (idx > 0) {
port_suffix = name.substring(idx);
}
for (int i = 1; port_suffix != null && i < live.size(); i++) {
if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
port_suffix = null;
break;
}
}
}
counterReset();
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (live.isEmpty() && dead.isEmpty()) {
out.print("There are no datanodes in the cluster");
} else {
int nnHttpPort = nn.getHttpAddress().getPort();
out.print("