Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.metadata.formatting;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.session.SessionState;
/**
* Format table and index information for human readability using
* simple lines of text.
*/
class TextMetaDataFormatter implements MetaDataFormatter {
private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class);
private static final int separator = Utilities.tabCode;
private static final int terminator = Utilities.newLineCode;
/** The number of columns to be used in pretty formatting metadata output.
* If -1, then the current terminal width is auto-detected and used.
*/
private final int prettyOutputNumCols;
private final boolean showPartColsSeparately;
public TextMetaDataFormatter(int prettyOutputNumCols, boolean partColsSeparately) {
this.prettyOutputNumCols = prettyOutputNumCols;
this.showPartColsSeparately = partColsSeparately;
}
/**
* Write an error message.
*/
@Override
public void error(OutputStream out, String msg, int errorCode, String sqlState)
throws HiveException
{
error(out, msg, errorCode, sqlState, null);
}
@Override
public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail)
throws HiveException
{
try {
out.write(errorMessage.getBytes("UTF-8"));
if(errorDetail != null) {
out.write(errorDetail.getBytes("UTF-8"));
}
out.write(errorCode);
if(sqlState != null) {
out.write(sqlState.getBytes("UTF-8"));//this breaks all the tests in .q files
}
out.write(terminator);
} catch (Exception e) {
throw new HiveException(e);
}
}
/**
* Show a list of tables.
*/
@Override
public void showTables(DataOutputStream out, Set tables)
throws HiveException
{
Iterator iterTbls = tables.iterator();
try {
while (iterTbls.hasNext()) {
// create a row per table name
out.writeBytes(iterTbls.next());
out.write(terminator);
}
} catch (IOException e) {
throw new HiveException(e);
}
}
@Override
public void describeTable(DataOutputStream outStream, String colPath,
String tableName, Table tbl, Partition part, List cols,
boolean isFormatted, boolean isExt, boolean isPretty,
boolean isOutputPadded, List colStats) throws HiveException {
try {
String output;
if (colPath.equals(tableName)) {
List partCols = tbl.isPartitioned() ? tbl.getPartCols() : null;
output = isPretty ?
MetaDataPrettyFormatUtils.getAllColumnsInformation(
cols, partCols, prettyOutputNumCols)
:
MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted, isOutputPadded, showPartColsSeparately);
} else {
output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted, isOutputPadded, colStats);
}
outStream.write(output.getBytes("UTF-8"));
if (tableName.equals(colPath)) {
if (isFormatted) {
if (part != null) {
output = MetaDataFormatUtils.getPartitionInformation(part);
} else {
output = MetaDataFormatUtils.getTableInformation(tbl);
}
outStream.write(output.getBytes("UTF-8"));
}
// if extended desc table then show the complete details of the table
if (isExt) {
// add empty line
outStream.write(terminator);
if (part != null) {
// show partition information
outStream.writeBytes("Detailed Partition Information");
outStream.write(separator);
outStream.write(part.getTPartition().toString().getBytes("UTF-8"));
outStream.write(separator);
// comment column is empty
outStream.write(terminator);
} else {
// show table information
outStream.writeBytes("Detailed Table Information");
outStream.write(separator);
outStream.write(tbl.getTTable().toString().getBytes("UTF-8"));
outStream.write(separator);
outStream.write(terminator);
}
}
}
} catch (IOException e) {
throw new HiveException(e);
}
}
@Override
public void showTableStatus(DataOutputStream outStream,
Hive db,
HiveConf conf,
List