Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.jsp.JspWriter;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
@InterfaceAudience.Private
public class DatanodeJspHelper {
private static final int PREV_BLOCK = -1;
private static final int NEXT_BLOCK = 1;
private static DFSClient getDFSClient(final UserGroupInformation user,
final String addr,
final Configuration conf
) throws IOException,
InterruptedException {
return
user.doAs(new PrivilegedExceptionAction() {
@Override
public DFSClient run() throws IOException {
return new DFSClient(NetUtils.createSocketAddr(addr), conf);
}
});
}
/**
* Get the default chunk size.
* @param conf the configuration
* @return the number of bytes to chunk in
*/
private static int getDefaultChunkSize(Configuration conf) {
return conf.getInt(DFSConfigKeys.DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY,
DFSConfigKeys.DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT);
}
static void generateDirectoryStructure(JspWriter out,
HttpServletRequest req,
HttpServletResponse resp,
Configuration conf
) throws IOException,
InterruptedException {
final String dir = JspHelper.validatePath(
StringEscapeUtils.unescapeHtml(req.getParameter("dir")));
if (dir == null) {
out.print("Invalid input");
return;
}
String tokenString = req.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
UserGroupInformation ugi = JspHelper.getUGI(req, conf);
String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
int namenodeInfoPort = -1;
if (namenodeInfoPortStr != null)
namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
final String nnAddr = req.getParameter(JspHelper.NAMENODE_ADDRESS);
if (nnAddr == null){
out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
return;
}
DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
String target = dir;
final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
if (targetStatus == null) { // not exists
out.print("
File or directory : " + StringEscapeUtils.escapeHtml(target) + " does not exist
");
}
out.print("Chunk size to view (in bytes, up to file's DFS block size): ");
out.print("");
out.print(" ");
out.print("");
out.print("");
out.print("");
if (!noLink)
out.print("");
// fetch the block from the datanode that has the last block for this file
final DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
List blocks = dfs.getNamenode().getBlockLocations(filename, 0,
Long.MAX_VALUE).getLocatedBlocks();
if (blocks == null || blocks.size() == 0) {
out.print("No datanodes contain blocks of file " + filename);
dfs.close();
return;
}
LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
String poolId = lastBlk.getBlock().getBlockPoolId();
long blockSize = lastBlk.getBlock().getNumBytes();
long blockId = lastBlk.getBlock().getBlockId();
Token accessToken = lastBlk.getBlockToken();
long genStamp = lastBlk.getBlock().getGenerationStamp();
DatanodeInfo chosenNode;
try {
chosenNode = JspHelper.bestNode(lastBlk, conf);
} catch (IOException e) {
out.print(e.toString());
dfs.close();
return;
}
InetSocketAddress addr =
NetUtils.createSocketAddr(chosenNode.getXferAddr());
// view the last chunkSizeToView bytes while Tailing
final long startOffset = blockSize >= chunkSizeToView ? blockSize
- chunkSizeToView : 0;
out.print("");
dfs.close();
}
/** Get DFSClient for a namenode corresponding to the BPID from a datanode */
public static DFSClient getDFSClient(final HttpServletRequest request,
final DataNode datanode, final Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return getDFSClient(ugi, nnAddr, conf);
}
/** Return a table containing version information. */
public static String getVersionTable(ServletContext context) {
StringBuilder sb = new StringBuilder();
final DataNode dataNode = (DataNode) context.getAttribute("datanode");
sb.append("