Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hive.hcatalog.mapreduce;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hive.hcatalog.common.ErrorType;
import org.apache.hive.hcatalog.common.HCatConstants;
import org.apache.hive.hcatalog.common.HCatException;
import org.apache.hive.hcatalog.common.HCatUtil;
import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils;
import org.apache.hive.hcatalog.har.HarOutputCommitterPostProcessor;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Part of the FileOutput*Container classes
* See {@link FileOutputFormatContainer} for more information
*/
class FileOutputCommitterContainer extends OutputCommitterContainer {
private static final String TEMP_DIR_NAME = "_temporary";
private static final String LOGS_DIR_NAME = "_logs";
static final String DYNTEMP_DIR_NAME = "_DYN";
static final String SCRATCH_DIR_NAME = "_SCRATCH";
private static final String APPEND_SUFFIX = "_a_";
private static final Logger LOG = LoggerFactory.getLogger(FileOutputCommitterContainer.class);
private final boolean dynamicPartitioningUsed;
private boolean partitionsDiscovered;
private final boolean customDynamicLocationUsed;
private Map> partitionsDiscoveredByPath;
private Map contextDiscoveredByPath;
private final HiveStorageHandler cachedStorageHandler;
HarOutputCommitterPostProcessor harProcessor = new HarOutputCommitterPostProcessor();
private String ptnRootLocation = null;
private OutputJobInfo jobInfo = null;
/**
* @param context current JobContext
* @param baseCommitter OutputCommitter to contain
* @throws IOException
*/
public FileOutputCommitterContainer(JobContext context,
org.apache.hadoop.mapred.OutputCommitter baseCommitter) throws IOException {
super(context, baseCommitter);
jobInfo = HCatOutputFormat.getJobInfo(context);
dynamicPartitioningUsed = jobInfo.isDynamicPartitioningUsed();
this.partitionsDiscovered = !dynamicPartitioningUsed;
cachedStorageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
Table table = new Table(jobInfo.getTableInfo().getTable());
if (dynamicPartitioningUsed && Boolean.valueOf((String)table.getProperty("EXTERNAL"))
&& jobInfo.getCustomDynamicPath() != null
&& jobInfo.getCustomDynamicPath().length() > 0) {
customDynamicLocationUsed = true;
} else {
customDynamicLocationUsed = false;
}
}
@Override
public void abortTask(TaskAttemptContext context) throws IOException {
if (!dynamicPartitioningUsed) {
getBaseOutputCommitter().abortTask(HCatMapRedUtil.createTaskAttemptContext(context));
}
}
@Override
public void commitTask(TaskAttemptContext context) throws IOException {
if (!dynamicPartitioningUsed) {
//See HCATALOG-499
FileOutputFormatContainer.setWorkOutputPath(context);
getBaseOutputCommitter().commitTask(HCatMapRedUtil.createTaskAttemptContext(context));
}
}
@Override
public boolean needsTaskCommit(TaskAttemptContext context) throws IOException {
if (!dynamicPartitioningUsed) {
return getBaseOutputCommitter().needsTaskCommit(HCatMapRedUtil.createTaskAttemptContext(context));
} else {
// called explicitly through FileRecordWriterContainer.close() if dynamic - return false by default
return false;
}
}
@Override
public void setupJob(JobContext context) throws IOException {
if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
getBaseOutputCommitter().setupJob(HCatMapRedUtil.createJobContext(context));
}
// in dynamic usecase, called through FileRecordWriterContainer
}
@Override
public void setupTask(TaskAttemptContext context) throws IOException {
if (!dynamicPartitioningUsed) {
getBaseOutputCommitter().setupTask(HCatMapRedUtil.createTaskAttemptContext(context));
}
}
@Override
public void abortJob(JobContext jobContext, State state) throws IOException {
try {
if (dynamicPartitioningUsed) {
discoverPartitions(jobContext);
}
org.apache.hadoop.mapred.JobContext mapRedJobContext = HCatMapRedUtil
.createJobContext(jobContext);
if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
getBaseOutputCommitter().abortJob(mapRedJobContext, state);
} else if (dynamicPartitioningUsed) {
for (JobContext currContext : contextDiscoveredByPath.values()) {
try {
new JobConf(currContext.getConfiguration())
.getOutputCommitter().abortJob(currContext,
state);
} catch (Exception e) {
throw new IOException(e);
}
}
}
Path src;
OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext);
Path tblPath = new Path(jobInfo.getTableInfo().getTableLocation());
if (dynamicPartitioningUsed) {
if (!customDynamicLocationUsed) {
src = new Path(getPartitionRootLocation(jobInfo.getLocation(), jobInfo.getTableInfo().getTable()
.getPartitionKeysSize()));
} else {
src = new Path(getCustomPartitionRootLocation(jobInfo, jobContext.getConfiguration()));
}
} else {
src = new Path(jobInfo.getLocation());
}
FileSystem fs = src.getFileSystem(jobContext.getConfiguration());
// Note fs.delete will fail on Windows. The reason is in OutputCommitter,
// Hadoop is still writing to _logs/history. On Linux, OS don't care file is still
// open and remove the directory anyway, but on Windows, OS refuse to remove a
// directory containing open files. So on Windows, we will leave output directory
// behind when job fail. User needs to remove the output directory manually
LOG.info("Job failed. Try cleaning up temporary directory [{}].", src);
if (!src.equals(tblPath)){
fs.delete(src, true);
}
} finally {
cancelDelegationTokens(jobContext);
}
}
public static final String SUCCEEDED_FILE_NAME = "_SUCCESS";
static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
"mapreduce.fileoutputcommitter.marksuccessfuljobs";
private static boolean getOutputDirMarking(Configuration conf) {
return conf.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER,
false);
}
@Override
public void commitJob(JobContext jobContext) throws IOException {
if (dynamicPartitioningUsed) {
discoverPartitions(jobContext);
// Commit each partition so it gets moved out of the job work
// dir
for (JobContext context : contextDiscoveredByPath.values()) {
new JobConf(context.getConfiguration())
.getOutputCommitter().commitJob(context);
}
}
if (getBaseOutputCommitter() != null && !dynamicPartitioningUsed) {
getBaseOutputCommitter().commitJob(
HCatMapRedUtil.createJobContext(jobContext));
}
registerPartitions(jobContext);
// create _SUCCESS FILE if so requested.
OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext);
if (getOutputDirMarking(jobContext.getConfiguration())) {
Path outputPath = new Path(jobInfo.getLocation());
FileSystem fileSys = outputPath.getFileSystem(jobContext
.getConfiguration());
// create a file in the folder to mark it
if (fileSys.exists(outputPath)) {
Path filePath = new Path(outputPath,
SUCCEEDED_FILE_NAME);
if (!fileSys.exists(filePath)) { // may have been
// created by
// baseCommitter.commitJob()
fileSys.create(filePath).close();
}
}
}
// Commit has succeeded (since no exceptions have been thrown.)
// Safe to cancel delegation tokens now.
cancelDelegationTokens(jobContext);
}
@Override
public void cleanupJob(JobContext context) throws IOException {
throw new IOException("The method cleanupJob is deprecated and should not be called.");
}
private String getCustomPartitionRootLocation(OutputJobInfo jobInfo, Configuration conf) {
if (ptnRootLocation == null) {
// we only need to calculate it once, it'll be the same for other partitions in this job.
String parentPath = jobInfo.getTableInfo().getTableLocation();
if (jobInfo.getCustomDynamicRoot() != null
&& jobInfo.getCustomDynamicRoot().length() > 0) {
parentPath = new Path(parentPath, jobInfo.getCustomDynamicRoot()).toString();
}
Path ptnRoot = new Path(parentPath, DYNTEMP_DIR_NAME +
conf.get(HCatConstants.HCAT_DYNAMIC_PTN_JOBID));
ptnRootLocation = ptnRoot.toString();
}
return ptnRootLocation;
}
private String getPartitionRootLocation(String ptnLocn, int numPtnKeys) {
if (customDynamicLocationUsed) {
return null;
}
if (ptnRootLocation == null) {
// we only need to calculate it once, it'll be the same for other partitions in this job.
Path ptnRoot = new Path(ptnLocn);
for (int i = 0; i < numPtnKeys; i++) {
// LOG.info("Getting parent of "+ptnRoot.getName());
ptnRoot = ptnRoot.getParent();
}
ptnRootLocation = ptnRoot.toString();
}
// LOG.info("Returning final parent : "+ptnRootLocation);
return ptnRootLocation;
}
/**
* Generate partition metadata object to be used to add to metadata.
* @param context The job context.
* @param jobInfo The OutputJobInfo.
* @param partLocnRoot The table-equivalent location root of the partition
* (temporary dir if dynamic partition, table dir if static)
* @param dynPartPath The path of dynamic partition which is created
* @param partKVs The keyvalue pairs that form the partition
* @param outputSchema The output schema for the partition
* @param params The parameters to store inside the partition
* @param table The Table metadata object under which this Partition will reside
* @param fs FileSystem object to operate on the underlying filesystem
* @param grpName Group name that owns the table dir
* @param perms FsPermission that's the default permission of the table dir.
* @return Constructed Partition metadata object
* @throws java.io.IOException
*/
private Partition constructPartition(
JobContext context, OutputJobInfo jobInfo,
String partLocnRoot, String dynPartPath, Map partKVs,
HCatSchema outputSchema, Map params,
Table table, FileSystem fs,
String grpName, FsPermission perms) throws IOException {
Partition partition = new Partition();
partition.setDbName(table.getDbName());
partition.setTableName(table.getTableName());
partition.setSd(new StorageDescriptor(table.getTTable().getSd()));
List fields = new ArrayList();
for (HCatFieldSchema fieldSchema : outputSchema.getFields()) {
fields.add(HCatSchemaUtils.getFieldSchema(fieldSchema));
}
partition.getSd().setCols(fields);
partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, partKVs));
partition.setParameters(params);
// Sets permissions and group name on partition dirs and files.
Path partPath;
if (customDynamicLocationUsed) {
partPath = new Path(dynPartPath);
} else if (!dynamicPartitioningUsed
&& Boolean.valueOf((String)table.getProperty("EXTERNAL"))
&& jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
// Now, we need to de-scratchify this location - i.e., get rid of any
// _SCRATCH[\d].?[\d]+ from the location.
String jobLocation = jobInfo.getLocation();
String finalLocn = jobLocation.replaceAll(Path.SEPARATOR + SCRATCH_DIR_NAME + "\\d\\.?\\d+","");
partPath = new Path(finalLocn);
} else {
partPath = new Path(partLocnRoot);
int i = 0;
for (FieldSchema partKey : table.getPartitionKeys()) {
if (i++ != 0) {
fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check
applyGroupAndPerms(fs, partPath, perms, grpName, false);
}
partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
}
}
// Apply the group and permissions to the leaf partition and files.
// Need not bother in case of HDFS as permission is taken care of by setting UMask
fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check
if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) {
applyGroupAndPerms(fs, partPath, perms, grpName, true);
}
// Set the location in the StorageDescriptor
if (dynamicPartitioningUsed) {
String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs, jobInfo);
if (harProcessor.isEnabled()) {
harProcessor.exec(context, partition, partPath);
partition.getSd().setLocation(
harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination)));
} else {
partition.getSd().setLocation(dynamicPartitionDestination);
}
} else {
partition.getSd().setLocation(partPath.toString());
}
return partition;
}
private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission,
String group, boolean recursive)
throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("applyGroupAndPerms : " + dir +
" perms: " + permission +
" group: " + group + " recursive: " + recursive);
}
fs.setPermission(dir, permission);
if (recursive) {
for (FileStatus fileStatus : fs.listStatus(dir)) {
if (fileStatus.isDir()) {
applyGroupAndPerms(fs, fileStatus.getPath(), permission, group, true);
} else {
fs.setPermission(fileStatus.getPath(), permission);
}
}
}
}
private String getFinalDynamicPartitionDestination(Table table, Map partKVs,
OutputJobInfo jobInfo) {
Path partPath = new Path(table.getTTable().getSd().getLocation());
if (!customDynamicLocationUsed) {
// file:///tmp/hcat_junit_warehouse/employee/_DYN0.7770480401313761/emp_country=IN/emp_state=KA ->
// file:///tmp/hcat_junit_warehouse/employee/emp_country=IN/emp_state=KA
for (FieldSchema partKey : table.getPartitionKeys()) {
partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs);
}
return partPath.toString();
} else {
// if custom root specified, update the parent path
if (jobInfo.getCustomDynamicRoot() != null
&& jobInfo.getCustomDynamicRoot().length() > 0) {
partPath = new Path(partPath, jobInfo.getCustomDynamicRoot());
}
return new Path(partPath, HCatFileUtil.resolveCustomPath(jobInfo, partKVs, false)).toString();
}
}
private Map getStorerParameterMap(StorerInfo storer) {
Map params = new HashMap();
//Copy table level hcat.* keys to the partition
for (Entry