Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.GlobFilter;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem.FILE_TYPE;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.apache.hadoop.fs.permission.FsCreateModes;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
/**
* FileSystem operation executors used by {@link HttpFSServer}.
*/
@InterfaceAudience.Private
public final class FSOperations {
private static int bufferSize = 4096;
private FSOperations() {
// not called
}
/**
* Set the buffer size. The size is set during the initialization of
* HttpFSServerWebApp.
* @param conf the configuration to get the bufferSize
*/
public static void setBufferSize(Configuration conf) {
bufferSize = conf.getInt(HTTPFS_BUFFER_SIZE_KEY,
HTTP_BUFFER_SIZE_DEFAULT);
}
/**
* @param fileStatus a FileStatus object
* @return JSON map suitable for wire transport
*/
private static Map toJson(FileStatus fileStatus) {
Map json = new LinkedHashMap<>();
json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(fileStatus, true));
return json;
}
/**
* @param fileStatuses list of FileStatus objects
* @param isFile is the fileStatuses from a file path
* @return JSON map suitable for wire transport
*/
@SuppressWarnings({"unchecked"})
private static Map toJson(FileStatus[] fileStatuses,
boolean isFile) {
Map json = new LinkedHashMap<>();
Map inner = new LinkedHashMap<>();
JSONArray statuses = new JSONArray();
for (FileStatus f : fileStatuses) {
statuses.add(toJsonInner(f, isFile));
}
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
return json;
}
/**
* Not meant to be called directly except by the other toJson functions.
*/
private static Map toJsonInner(FileStatus fileStatus,
boolean emptyPathSuffix) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
FILE_TYPE fileType = HttpFSFileSystem.FILE_TYPE.getType(fileStatus);
json.put(HttpFSFileSystem.TYPE_JSON, fileType.toString());
if (fileType.equals(FILE_TYPE.SYMLINK)) {
// put the symlink into Json
try {
json.put(HttpFSFileSystem.SYMLINK_JSON,
fileStatus.getSymlink().getName());
} catch (IOException e) {
// Can't happen.
}
}
json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
json.put(HttpFSFileSystem.PERMISSION_JSON,
HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
fileStatus.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
if (fileStatus instanceof HdfsFileStatus) {
// Add HDFS-specific fields to response
HdfsFileStatus hdfsFileStatus = (HdfsFileStatus) fileStatus;
json.put(HttpFSFileSystem.CHILDREN_NUM_JSON,
hdfsFileStatus.getChildrenNum());
json.put(HttpFSFileSystem.FILE_ID_JSON,
hdfsFileStatus.getFileId());
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
hdfsFileStatus.getStoragePolicy());
if (hdfsFileStatus.getErasureCodingPolicy() != null) {
json.put(HttpFSFileSystem.ECPOLICYNAME_JSON,
hdfsFileStatus.getErasureCodingPolicy().getName());
json.put(HttpFSFileSystem.ECPOLICY_JSON,
JsonUtil.getEcPolicyAsMap(
hdfsFileStatus.getErasureCodingPolicy()));
}
}
if (fileStatus.getPermission().getAclBit()) {
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
}
if (fileStatus.getPermission().getEncryptedBit()) {
json.put(HttpFSFileSystem.ENC_BIT_JSON, true);
}
if (fileStatus.getPermission().getErasureCodedBit()) {
json.put(HttpFSFileSystem.EC_BIT_JSON, true);
}
if (fileStatus.isSnapshotEnabled()) {
json.put(HttpFSFileSystem.SNAPSHOT_BIT_JSON, true);
}
return json;
}
/**
* Serializes a DirectoryEntries object into the JSON for a
* WebHDFS {@link org.apache.hadoop.hdfs.protocol.DirectoryListing}.
*
* These two classes are slightly different, due to the impedance
* mismatches between the WebHDFS and FileSystem APIs.
* @param entries
* @param isFile is the entries from a file path
* @return json
*/
private static Map toJson(FileSystem.DirectoryEntries
entries, boolean isFile) {
Map json = new LinkedHashMap<>();
Map inner = new LinkedHashMap<>();
Map fileStatuses = toJson(entries.getEntries(), isFile);
inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
: 0);
json.put(HttpFSFileSystem.DIRECTORY_LISTING_JSON, inner);
return json;
}
/** Converts an AclStatus object into a JSON object.
*
* @param aclStatus AclStatus object
*
* @return The JSON representation of the ACLs for the file
*/
@SuppressWarnings({"unchecked"})
private static Map aclStatusToJSON(AclStatus aclStatus) {
Map json = new LinkedHashMap();
Map inner = new LinkedHashMap();
JSONArray entriesArray = new JSONArray();
inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
inner.put(HttpFSFileSystem.PERMISSION_JSON,
HttpFSFileSystem.permissionToString(aclStatus.getPermission()));
inner.put(HttpFSFileSystem.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
for ( AclEntry e : aclStatus.getEntries() ) {
entriesArray.add(e.toString());
}
inner.put(HttpFSFileSystem.ACL_ENTRIES_JSON, entriesArray);
json.put(HttpFSFileSystem.ACL_STATUS_JSON, inner);
return json;
}
/**
* Converts a FileChecksum object into a JSON array
* object.
*
* @param checksum file checksum.
*
* @return The JSON representation of the file checksum.
*/
@SuppressWarnings({"unchecked"})
private static Map fileChecksumToJSON(FileChecksum checksum) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
return response;
}
/**
* Converts xAttrs to a JSON object.
*
* @param xAttrs file xAttrs.
* @param encoding format of xattr values.
*
* @return The JSON representation of the xAttrs.
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrsToJSON(Map xAttrs,
XAttrCodec encoding) throws IOException {
Map jsonMap = new LinkedHashMap();
JSONArray jsonArray = new JSONArray();
if (xAttrs != null) {
for (Entry e : xAttrs.entrySet()) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.XATTR_NAME_JSON, e.getKey());
if (e.getValue() != null) {
json.put(HttpFSFileSystem.XATTR_VALUE_JSON,
XAttrCodec.encodeValue(e.getValue(), encoding));
}
jsonArray.add(json);
}
}
jsonMap.put(HttpFSFileSystem.XATTRS_JSON, jsonArray);
return jsonMap;
}
/**
* Converts xAttr names to a JSON object.
*
* @param names file xAttr names.
*
* @return The JSON representation of the xAttr names.
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private static Map xAttrNamesToJSON(List names) throws IOException {
Map jsonMap = new LinkedHashMap();
jsonMap.put(HttpFSFileSystem.XATTRNAMES_JSON, JSONArray.toJSONString(names));
return jsonMap;
}
/**
* Converts a ContentSummary object into a JSON array
* object.
*
* @param contentSummary the content summary
*
* @return The JSON representation of the content summary.
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_ECPOLICY_JSON,
contentSummary.getErasureCodingPolicy());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON,
contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON,
contentSummary.getLength());
Map quotaUsageMap = quotaUsageToMap(contentSummary);
for (Map.Entry e : quotaUsageMap.entrySet()) {
// For ContentSummary we don't need this since we already have
// separate count for file and directory.
if (!e.getKey().equals(
HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON)) {
json.put(e.getKey(), e.getValue());
}
}
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}
/**
* Converts a QuotaUsage object into a JSON array
* object.
*/
@SuppressWarnings({"unchecked"})
private static Map quotaUsageToJSON(QuotaUsage quotaUsage) {
Map response = new LinkedHashMap();
Map quotaUsageMap = quotaUsageToMap(quotaUsage);
response.put(HttpFSFileSystem.QUOTA_USAGE_JSON, quotaUsageMap);
return response;
}
private static Map quotaUsageToMap(QuotaUsage quotaUsage) {
Map result = new LinkedHashMap<>();
result.put(HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON,
quotaUsage.getFileAndDirectoryCount());
result.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota());
result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_CONSUMED_JSON,
quotaUsage.getSpaceConsumed());
result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_QUOTA_JSON,
quotaUsage.getSpaceQuota());
Map> typeQuota = new TreeMap<>();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
long tQuota = quotaUsage.getTypeQuota(t);
if (tQuota != HdfsConstants.QUOTA_RESET) {
Map type = typeQuota.get(t.toString());
if (type == null) {
type = new TreeMap<>();
typeQuota.put(t.toString(), type);
}
type.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON,
quotaUsage.getTypeQuota(t));
type.put(HttpFSFileSystem.QUOTA_USAGE_CONSUMED_JSON,
quotaUsage.getTypeConsumed(t));
}
}
result.put(HttpFSFileSystem.QUOTA_USAGE_TYPE_QUOTA_JSON, typeQuota);
return result;
}
/**
* Converts an object into a Json Map with with one key-value entry.
*
* It assumes the given value is either a JSON primitive type or a
* JsonAware instance.
*
* @param name name for the key of the entry.
* @param value for the value of the entry.
*
* @return the JSON representation of the key-value pair.
*/
@SuppressWarnings("unchecked")
private static JSONObject toJSON(String name, Object value) {
JSONObject json = new JSONObject();
json.put(name, value);
return json;
}
@SuppressWarnings({ "unchecked" })
private static JSONObject storagePolicyToJSON(BlockStoragePolicySpi policy) {
BlockStoragePolicy p = (BlockStoragePolicy) policy;
JSONObject policyJson = new JSONObject();
policyJson.put("id", p.getId());
policyJson.put("name", p.getName());
policyJson.put("storageTypes", toJsonArray(p.getStorageTypes()));
policyJson.put("creationFallbacks", toJsonArray(p.getCreationFallbacks()));
policyJson.put("replicationFallbacks",
toJsonArray(p.getReplicationFallbacks()));
policyJson.put("copyOnCreateFile", p.isCopyOnCreateFile());
return policyJson;
}
@SuppressWarnings("unchecked")
private static JSONArray toJsonArray(StorageType[] storageTypes) {
JSONArray jsonArray = new JSONArray();
for (StorageType type : storageTypes) {
jsonArray.add(type.toString());
}
return jsonArray;
}
@SuppressWarnings("unchecked")
private static JSONObject storagePoliciesToJSON(
Collection extends BlockStoragePolicySpi> storagePolicies) {
JSONObject json = new JSONObject();
JSONArray jsonArray = new JSONArray();
JSONObject policies = new JSONObject();
if (storagePolicies != null) {
for (BlockStoragePolicySpi policy : storagePolicies) {
JSONObject policyMap = storagePolicyToJSON(policy);
jsonArray.add(policyMap);
}
}
policies.put(HttpFSFileSystem.STORAGE_POLICY_JSON, jsonArray);
json.put(HttpFSFileSystem.STORAGE_POLICIES_JSON, policies);
return json;
}
/**
* Executes the fsStatus operation.
*
* @param fsStatus a FsStatus object
* @return JSON map suitable for wire transport
*/
@SuppressWarnings("unchecked")
private static Map toJson(FsStatus fsStatus) {
Map json = new LinkedHashMap<>();
JSONObject statusJson = new JSONObject();
statusJson.put(HttpFSFileSystem.USED_JSON, fsStatus.getUsed());
statusJson.put(HttpFSFileSystem.REMAINING_JSON, fsStatus.getRemaining());
statusJson.put(HttpFSFileSystem.CAPACITY_JSON, fsStatus.getCapacity());
json.put(HttpFSFileSystem.FS_STATUS_JSON, statusJson);
return json;
}
/**
* Executor that performs an append FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSAppend implements FileSystemAccess.FileSystemExecutor {
private InputStream is;
private Path path;
/**
* Creates an Append executor.
*
* @param is input stream to append.
* @param path path of the file to append.
*/
public FSAppend(InputStream is, String path) {
this.is = is;
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
OutputStream os = fs.append(path, bufferSize);
long bytes = copyBytes(is, os);
HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes);
return null;
}
}
/**
* Executor that performs a concat FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSConcat implements FileSystemAccess.FileSystemExecutor {
private Path path;
private Path[] sources;
/**
* Creates a Concat executor.
*
* @param path target path to concat to.
* @param sources comma separated absolute paths to use as sources.
*/
public FSConcat(String path, String[] sources) {
this.sources = new Path[sources.length];
for(int i = 0; i < sources.length; i++) {
this.sources[i] = new Path(sources[i]);
}
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.concat(path, sources);
return null;
}
}
/**
* Executor that performs a truncate FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSTruncate implements
FileSystemAccess.FileSystemExecutor {
private Path path;
private long newLength;
/**
* Creates a Truncate executor.
*
* @param path target path to truncate to.
* @param newLength The size the file is to be truncated to.
*/
public FSTruncate(String path, long newLength) {
this.path = new Path(path);
this.newLength = newLength;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return true if the file has been truncated to the desired,
* false if a background process of adjusting the
* length of the last block has been started, and clients should
* wait for it to complete before proceeding with further file
* updates.
*
* @throws IOException thrown if an IO error occurred.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean result = fs.truncate(path, newLength);
HttpFSServerWebApp.get().getMetrics().incrOpsTruncate();
return toJSON(
StringUtils.toLowerCase(HttpFSFileSystem.TRUNCATE_JSON), result);
}
}
/**
* Executor that performs a content-summary FileSystemAccess files system operation.
*/
@InterfaceAudience.Private
public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor