org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hive-apache Show documentation
Show all versions of hive-apache Show documentation
Shaded version of Apache Hive for Presto
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.parse;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
import java.io.FileNotFoundException;
import java.io.Serializable;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.antlr.runtime.tree.CommonTree;
import org.antlr.runtime.tree.Tree;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMNullablePool;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
import org.apache.hadoop.hive.ql.exec.ColumnStatsUpdateTask;
import org.apache.hadoop.hive.ql.exec.DDLTask;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.hooks.Entity.Type;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.lockmgr.LockException;
import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory;
import org.apache.hadoop.hive.ql.metadata.DefaultConstraint;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils;
import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactory;
import org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl;
import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc;
import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc;
import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc.AlterMaterializedViewTypes;
import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId;
import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.CreateWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.DDLDesc;
import org.apache.hadoop.hive.ql.plan.DDLWork;
import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
import org.apache.hadoop.hive.ql.plan.DescTableDesc;
import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.DropTableDesc;
import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.DropWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.DropWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.plan.HiveOperation;
import org.apache.hadoop.hive.ql.plan.KillQueryDesc;
import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.LockTableDesc;
import org.apache.hadoop.hive.ql.plan.MoveWork;
import org.apache.hadoop.hive.ql.plan.MsckDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
import org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc;
import org.apache.hadoop.hive.ql.plan.ShowTxnsDesc;
import org.apache.hadoop.hive.ql.plan.StatsWork;
import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TimestampLocalTZTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.util.StringUtils;
import io.prestosql.hive.$internal.org.slf4j.Logger;
import io.prestosql.hive.$internal.org.slf4j.LoggerFactory;
import io.prestosql.hive.$internal.com.google.common.collect.ImmutableList;
import io.prestosql.hive.$internal.com.google.common.collect.Lists;
/**
* DDLSemanticAnalyzer.
*
*/
public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
private static final Logger LOG = LoggerFactory.getLogger(DDLSemanticAnalyzer.class);
private static final Map TokenToTypeName = new HashMap();
private final Set reservedPartitionValues;
private final HiveAuthorizationTaskFactory hiveAuthorizationTaskFactory;
private WriteEntity alterTableOutput;
// Equivalent to acidSinks, but for DDL operations that change data.
private DDLDesc.DDLDescWithWriteId ddlDescWithWriteId;
static {
TokenToTypeName.put(HiveParser.TOK_BOOLEAN, serdeConstants.BOOLEAN_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_TINYINT, serdeConstants.TINYINT_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_SMALLINT, serdeConstants.SMALLINT_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_INT, serdeConstants.INT_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_BIGINT, serdeConstants.BIGINT_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_FLOAT, serdeConstants.FLOAT_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DOUBLE, serdeConstants.DOUBLE_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_STRING, serdeConstants.STRING_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_CHAR, serdeConstants.CHAR_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_VARCHAR, serdeConstants.VARCHAR_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_BINARY, serdeConstants.BINARY_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DATE, serdeConstants.DATE_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DATETIME, serdeConstants.DATETIME_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_TIMESTAMP, serdeConstants.TIMESTAMP_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_TIMESTAMPLOCALTZ, serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_INTERVAL_YEAR_MONTH, serdeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_INTERVAL_DAY_TIME, serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME);
TokenToTypeName.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME);
}
public static String getTypeName(ASTNode node) throws SemanticException {
int token = node.getType();
String typeName;
// datetime type isn't currently supported
if (token == HiveParser.TOK_DATETIME) {
throw new SemanticException(ErrorMsg.UNSUPPORTED_TYPE.getMsg());
}
switch (token) {
case HiveParser.TOK_CHAR:
CharTypeInfo charTypeInfo = ParseUtils.getCharTypeInfo(node);
typeName = charTypeInfo.getQualifiedName();
break;
case HiveParser.TOK_VARCHAR:
VarcharTypeInfo varcharTypeInfo = ParseUtils.getVarcharTypeInfo(node);
typeName = varcharTypeInfo.getQualifiedName();
break;
case HiveParser.TOK_TIMESTAMPLOCALTZ:
HiveConf conf;
try {
conf = Hive.get().getConf();
} catch (HiveException e) {
throw new SemanticException(e);
}
TimestampLocalTZTypeInfo timestampLocalTZTypeInfo = TypeInfoFactory.getTimestampTZTypeInfo(
conf.getLocalTimeZone());
typeName = timestampLocalTZTypeInfo.getQualifiedName();
break;
case HiveParser.TOK_DECIMAL:
DecimalTypeInfo decTypeInfo = ParseUtils.getDecimalTypeTypeInfo(node);
typeName = decTypeInfo.getQualifiedName();
break;
default:
typeName = TokenToTypeName.get(token);
}
return typeName;
}
public DDLSemanticAnalyzer(QueryState queryState) throws SemanticException {
this(queryState, createHiveDB(queryState.getConf()));
}
public DDLSemanticAnalyzer(QueryState queryState, Hive db) throws SemanticException {
super(queryState, db);
reservedPartitionValues = new HashSet();
// Partition can't have this name
reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME));
reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME));
// Partition value can't end in this suffix
reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL));
reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED));
reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED));
hiveAuthorizationTaskFactory = createAuthorizationTaskFactory(conf, db);
}
@Override
public void analyzeInternal(ASTNode input) throws SemanticException {
ASTNode ast = input;
switch (ast.getType()) {
case HiveParser.TOK_ALTERTABLE: {
ast = (ASTNode) input.getChild(1);
String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0));
// TODO CAT - for now always use the default catalog. Eventually will want to see if
// the user specified a catalog
String catName = MetaStoreUtils.getDefaultCatalog(conf);
String tableName = getDotName(qualified);
HashMap partSpec = null;
ASTNode partSpecNode = (ASTNode)input.getChild(2);
if (partSpecNode != null) {
// We can use alter table partition rename to convert/normalize the legacy partition
// column values. In so, we should not enable the validation to the old partition spec
// passed in this command.
if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
partSpec = getPartSpec(partSpecNode);
} else {
partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false);
}
}
if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) {
analyzeAlterTableRename(qualified, ast, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) {
analyzeAlterTableTouch(qualified, ast);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ARCHIVE) {
analyzeAlterTableArchive(qualified, ast, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) {
analyzeAlterTableArchive(qualified, ast, true);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) {
analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.ADDCOLS);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) {
analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) {
analyzeAlterTableRenameCol(catName, qualified, ast, partSpec);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) {
analyzeAlterTableAddParts(qualified, ast, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) {
analyzeAlterTableDropParts(qualified, ast, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) {
analyzeAlterTablePartColType(qualified, ast);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) {
analyzeAlterTableProps(qualified, null, ast, false, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) {
analyzeAlterTableProps(qualified, null, ast, false, true);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UPDATESTATS) {
analyzeAlterTableProps(qualified, partSpec, ast, false, false);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) {
analyzeAltertableSkewedby(qualified, ast);
} else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) {
analyzeExchangePartition(qualified, ast);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) {
analyzeAlterTableFileFormat(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) {
analyzeAlterTableLocation(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) {
analyzeAlterTablePartMergeFiles(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) {
analyzeAlterTableSerde(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) {
analyzeAlterTableSerdeProps(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
analyzeAlterTableRenamePart(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) {
analyzeAlterTableSkewedLocation(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS) {
analyzeAlterTableBucketNum(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) {
analyzeAlterTableClusterSort(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_COMPACT) {
analyzeAlterTableCompact(ast, tableName, partSpec);
} else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){
analyzeAlterTableUpdateStats(ast, tableName, partSpec);
} else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) {
analyzeAlterTableDropConstraint(ast, tableName);
} else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ADDCONSTRAINT) {
analyzeAlterTableAddConstraint(ast, tableName);
} else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLUMNS) {
analyzeAlterTableUpdateColumns(ast, tableName, partSpec);
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_OWNER) {
analyzeAlterTableOwner(ast, tableName);
}
break;
}
case HiveParser.TOK_DROPTABLE:
analyzeDropTable(ast, null);
break;
case HiveParser.TOK_TRUNCATETABLE:
analyzeTruncateTable(ast);
break;
case HiveParser.TOK_DESCTABLE:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeDescribeTable(ast);
break;
case HiveParser.TOK_SHOWDATABASES:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowDatabases(ast);
break;
case HiveParser.TOK_SHOWTABLES:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowTables(ast);
break;
case HiveParser.TOK_SHOWCOLUMNS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowColumns(ast);
break;
case HiveParser.TOK_SHOW_TABLESTATUS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowTableStatus(ast);
break;
case HiveParser.TOK_SHOW_TBLPROPERTIES:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowTableProperties(ast);
break;
case HiveParser.TOK_SHOWFUNCTIONS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowFunctions(ast);
break;
case HiveParser.TOK_SHOWLOCKS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowLocks(ast);
break;
case HiveParser.TOK_SHOWDBLOCKS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowDbLocks(ast);
break;
case HiveParser.TOK_SHOW_COMPACTIONS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowCompactions(ast);
break;
case HiveParser.TOK_SHOW_TRANSACTIONS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowTxns(ast);
break;
case HiveParser.TOK_ABORT_TRANSACTIONS:
analyzeAbortTxns(ast);
break;
case HiveParser.TOK_KILL_QUERY:
analyzeKillQuery(ast);
break;
case HiveParser.TOK_SHOWCONF:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowConf(ast);
break;
case HiveParser.TOK_SHOWVIEWS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowViews(ast);
break;
case HiveParser.TOK_SHOWMATERIALIZEDVIEWS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowMaterializedViews(ast);
break;
case HiveParser.TOK_DESCFUNCTION:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeDescFunction(ast);
break;
case HiveParser.TOK_DESCDATABASE:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeDescDatabase(ast);
break;
case HiveParser.TOK_MSCK:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeMetastoreCheck(ast);
break;
case HiveParser.TOK_DROPVIEW:
analyzeDropTable(ast, TableType.VIRTUAL_VIEW);
break;
case HiveParser.TOK_DROP_MATERIALIZED_VIEW:
analyzeDropTable(ast, TableType.MATERIALIZED_VIEW);
break;
case HiveParser.TOK_ALTERVIEW: {
String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
ast = (ASTNode) ast.getChild(1);
if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) {
analyzeAlterTableProps(qualified, null, ast, true, false);
} else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) {
analyzeAlterTableProps(qualified, null, ast, true, true);
} else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) {
analyzeAlterTableAddParts(qualified, ast, true);
} else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) {
analyzeAlterTableDropParts(qualified, ast, true);
} else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) {
analyzeAlterTableRename(qualified, ast, true);
}
break;
}
case HiveParser.TOK_ALTER_MATERIALIZED_VIEW: {
ast = (ASTNode) input.getChild(1);
String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0));
String tableName = getDotName(qualified);
if (ast.getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REWRITE) {
analyzeAlterMaterializedViewRewrite(tableName, ast);
}
break;
}
case HiveParser.TOK_SHOWPARTITIONS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowPartitions(ast);
break;
case HiveParser.TOK_SHOW_CREATEDATABASE:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowCreateDatabase(ast);
break;
case HiveParser.TOK_SHOW_CREATETABLE:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowCreateTable(ast);
break;
case HiveParser.TOK_LOCKTABLE:
analyzeLockTable(ast);
break;
case HiveParser.TOK_UNLOCKTABLE:
analyzeUnlockTable(ast);
break;
case HiveParser.TOK_LOCKDB:
analyzeLockDatabase(ast);
break;
case HiveParser.TOK_UNLOCKDB:
analyzeUnlockDatabase(ast);
break;
case HiveParser.TOK_CREATEDATABASE:
analyzeCreateDatabase(ast);
break;
case HiveParser.TOK_DROPDATABASE:
analyzeDropDatabase(ast);
break;
case HiveParser.TOK_SWITCHDATABASE:
analyzeSwitchDatabase(ast);
break;
case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
analyzeAlterDatabaseProperties(ast);
break;
case HiveParser.TOK_ALTERDATABASE_OWNER:
analyzeAlterDatabaseOwner(ast);
break;
case HiveParser.TOK_ALTERDATABASE_LOCATION:
analyzeAlterDatabaseLocation(ast);
break;
case HiveParser.TOK_CREATEROLE:
analyzeCreateRole(ast);
break;
case HiveParser.TOK_DROPROLE:
analyzeDropRole(ast);
break;
case HiveParser.TOK_SHOW_ROLE_GRANT:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowRoleGrant(ast);
break;
case HiveParser.TOK_SHOW_ROLE_PRINCIPALS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowRolePrincipals(ast);
break;
case HiveParser.TOK_SHOW_ROLES:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowRoles(ast);
break;
case HiveParser.TOK_GRANT_ROLE:
analyzeGrantRevokeRole(true, ast);
break;
case HiveParser.TOK_REVOKE_ROLE:
analyzeGrantRevokeRole(false, ast);
break;
case HiveParser.TOK_GRANT:
analyzeGrant(ast);
break;
case HiveParser.TOK_SHOW_GRANT:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowGrant(ast);
break;
case HiveParser.TOK_REVOKE:
analyzeRevoke(ast);
break;
case HiveParser.TOK_SHOW_SET_ROLE:
analyzeSetShowRole(ast);
break;
case HiveParser.TOK_CACHE_METADATA:
analyzeCacheMetadata(ast);
break;
case HiveParser.TOK_CREATE_RP:
analyzeCreateResourcePlan(ast);
break;
case HiveParser.TOK_SHOW_RP:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowResourcePlan(ast);
break;
case HiveParser.TOK_ALTER_RP:
analyzeAlterResourcePlan(ast);
break;
case HiveParser.TOK_DROP_RP:
analyzeDropResourcePlan(ast);
break;
case HiveParser.TOK_CREATE_TRIGGER:
analyzeCreateTrigger(ast);
break;
case HiveParser.TOK_ALTER_TRIGGER:
analyzeAlterTrigger(ast);
break;
case HiveParser.TOK_DROP_TRIGGER:
analyzeDropTrigger(ast);
break;
case HiveParser.TOK_CREATE_POOL:
analyzeCreatePool(ast);
break;
case HiveParser.TOK_ALTER_POOL:
analyzeAlterPool(ast);
break;
case HiveParser.TOK_DROP_POOL:
analyzeDropPool(ast);
break;
case HiveParser.TOK_CREATE_MAPPING:
analyzeCreateOrAlterMapping(ast, false);
break;
case HiveParser.TOK_ALTER_MAPPING:
analyzeCreateOrAlterMapping(ast, true);
break;
case HiveParser.TOK_DROP_MAPPING:
analyzeDropMapping(ast);
break;
default:
throw new SemanticException("Unsupported command: " + ast);
}
if (fetchTask != null && !rootTasks.isEmpty()) {
rootTasks.get(rootTasks.size() - 1).setFetchSource(true);
}
}
private void analyzeCacheMetadata(ASTNode ast) throws SemanticException {
Table tbl = AnalyzeCommandUtils.getTable(ast, this);
Map partSpec = null;
CacheMetadataDesc desc;
// In 2 cases out of 3, we could pass the path and type directly to metastore...
if (AnalyzeCommandUtils.isPartitionLevelStats(ast)) {
partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf);
Partition part = getPartition(tbl, partSpec, true);
desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), part.getName());
inputs.add(new ReadEntity(part));
} else {
// Should we get all partitions for a partitioned table?
desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), tbl.isPartitioned());
inputs.add(new ReadEntity(tbl));
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map partSpec)
throws SemanticException {
String colName = getUnescapedName((ASTNode) ast.getChild(0));
Map mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0));
Table tbl = getTable(tblName);
String partName = null;
if (partSpec != null) {
try {
partName = Warehouse.makePartName(partSpec, false);
} catch (MetaException e) {
throw new SemanticException("partition " + partSpec.toString()
+ " not found");
}
}
String colType = null;
List cols = tbl.getCols();
for (FieldSchema col : cols) {
if (colName.equalsIgnoreCase(col.getName())) {
colType = col.getType();
break;
}
}
if (colType == null) {
throw new SemanticException("column type not found");
}
ColumnStatsUpdateWork columnStatsUpdateWork =
new ColumnStatsUpdateWork(partName, mapProp, tbl.getDbName(), tbl.getTableName(), colName, colType);
ColumnStatsUpdateTask cStatsUpdateTask = (ColumnStatsUpdateTask) TaskFactory
.get(columnStatsUpdateWork);
rootTasks.add(cStatsUpdateTask);
}
private void analyzeSetShowRole(ASTNode ast) throws SemanticException {
switch (ast.getChildCount()) {
case 0:
ctx.setResFile(ctx.getLocalTmpPath());
rootTasks.add(hiveAuthorizationTaskFactory.createShowCurrentRoleTask(
getInputs(), getOutputs(), ctx.getResFile()));
setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema()));
break;
case 1:
rootTasks.add(hiveAuthorizationTaskFactory.createSetRoleTask(
BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText()),
getInputs(), getOutputs()));
break;
default:
throw new SemanticException("Internal error. ASTNode expected to have 0 or 1 child. "
+ ast.dump());
}
}
private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) throws SemanticException {
Task extends Serializable> task;
if(grant) {
task = hiveAuthorizationTaskFactory.createGrantRoleTask(ast, getInputs(), getOutputs());
} else {
task = hiveAuthorizationTaskFactory.createRevokeRoleTask(ast, getInputs(), getOutputs());
}
if(task != null) {
rootTasks.add(task);
}
}
private void analyzeShowGrant(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createShowGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
setFetchTask(createFetchTask(ShowGrantDesc.getSchema()));
}
}
private void analyzeGrant(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createGrantTask(ast, getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
}
}
private void analyzeRevoke(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createRevokeTask(ast, getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
}
}
private void analyzeCreateRole(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createCreateRoleTask(ast, getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
}
}
private void analyzeDropRole(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createDropRoleTask(ast, getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
}
}
private void analyzeShowRoleGrant(ASTNode ast) throws SemanticException {
Task extends Serializable> task = hiveAuthorizationTaskFactory.
createShowRoleGrantTask(ast, ctx.getResFile(), getInputs(), getOutputs());
if(task != null) {
rootTasks.add(task);
setFetchTask(createFetchTask(RoleDDLDesc.getRoleShowGrantSchema()));
}
}
private void analyzeShowRolePrincipals(ASTNode ast) throws SemanticException {
Task roleDDLTask = (Task) hiveAuthorizationTaskFactory
.createShowRolePrincipalsTask(ast, ctx.getResFile(), getInputs(), getOutputs());
if (roleDDLTask != null) {
rootTasks.add(roleDDLTask);
setFetchTask(createFetchTask(RoleDDLDesc.getShowRolePrincipalsSchema()));
}
}
private void analyzeShowRoles(ASTNode ast) throws SemanticException {
@SuppressWarnings("unchecked")
Task roleDDLTask = (Task) hiveAuthorizationTaskFactory
.createShowRolesTask(ast, ctx.getResFile(), getInputs(), getOutputs());
if (roleDDLTask != null) {
rootTasks.add(roleDDLTask);
setFetchTask(createFetchTask(RoleDDLDesc.getRoleNameSchema()));
}
}
private void analyzeAlterDatabaseProperties(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
Map dbProps = null;
for (int i = 1; i < ast.getChildCount(); i++) {
ASTNode childNode = (ASTNode) ast.getChild(i);
switch (childNode.getToken().getType()) {
case HiveParser.TOK_DATABASEPROPERTIES:
dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0));
break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
}
}
AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, dbProps, null);
addAlterDbDesc(alterDesc);
}
private void addAlterDbDesc(AlterDatabaseDesc alterDesc) throws SemanticException {
Database database = getDatabase(alterDesc.getDatabaseName());
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterDesc)));
}
private void analyzeAlterDatabaseOwner(ASTNode ast) throws SemanticException {
String dbName = getUnescapedName((ASTNode) ast.getChild(0));
PrincipalDesc principalDesc = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast
.getChild(1));
// The syntax should not allow these fields to be null, but lets verify
String nullCmdMsg = "can't be null in alter database set owner command";
if(principalDesc.getName() == null){
throw new SemanticException("Owner name " + nullCmdMsg);
}
if(principalDesc.getType() == null){
throw new SemanticException("Owner type " + nullCmdMsg);
}
AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, principalDesc, null);
addAlterDbDesc(alterDesc);
}
private void analyzeAlterDatabaseLocation(ASTNode ast) throws SemanticException {
String dbName = getUnescapedName((ASTNode) ast.getChild(0));
String newLocation = unescapeSQLString(ast.getChild(1).getText());
addLocationToOutputs(newLocation);
AlterDatabaseDesc alterDesc = new AlterDatabaseDesc(dbName, newLocation);
addAlterDbDesc(alterDesc);
}
private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException {
Table destTable = getTable(qualified);
Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(1)));
// Get the partition specs
Map partSpecs = getValidatedPartSpec(sourceTable, (ASTNode)ast.getChild(0), conf, false);
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(
destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(
destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
if (!sameColumns || !samePartitions) {
throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
}
// Exchange partition is not allowed with transactional tables.
// If only source is transactional table, then target will see deleted rows too as no snapshot
// isolation applicable for non-acid tables.
// If only target is transactional table, then data would be visible to all ongoing transactions
// affecting the snapshot isolation.
// If both source and targets are transactional tables, then target partition may have delta/base
// files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well.
if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) {
throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg());
}
// check if source partition exists
getPartitions(sourceTable, partSpecs, true);
// Verify that the partitions specified are continuous
// If a subpartition value is specified without specifying a partition's value
// then we throw an exception
int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs);
if (counter < 0) {
throw new SemanticException(
ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
}
List destPartitions = null;
try {
destPartitions = getPartitions(destTable, partSpecs, true);
} catch (SemanticException ex) {
// We should expect a semantic exception being throw as this partition
// should not be present.
}
if (destPartitions != null) {
// If any destination partition is present then throw a Semantic Exception.
throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
}
AlterTableExchangePartition alterTableExchangePartition =
new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTableExchangePartition)));
inputs.add(new ReadEntity(sourceTable));
outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
}
/**
* @param partitionKeys the list of partition keys of the table
* @param partSpecs the partition specs given by the user
* @return >=0 if no subpartition value is specified without a partition's
* value being specified else it returns -1
*/
private int isPartitionValueContinuous(List partitionKeys,
Map partSpecs) {
int counter = 0;
for (FieldSchema partitionKey : partitionKeys) {
if (partSpecs.containsKey(partitionKey.getName())) {
counter++;
continue;
}
return partSpecs.size() == counter ? counter : -1;
}
return counter;
}
private void analyzeCreateResourcePlan(ASTNode ast) throws SemanticException {
if (ast.getChildCount() == 0) {
throw new SemanticException("Expected name in CREATE RESOURCE PLAN statement");
}
String resourcePlanName = unescapeIdentifier(ast.getChild(0).getText());
Integer queryParallelism = null;
String likeName = null;
for (int i = 1; i < ast.getChildCount(); ++i) {
Tree child = ast.getChild(i);
switch (child.getType()) {
case HiveParser.TOK_QUERY_PARALLELISM:
// Note: later we may be able to set multiple things together (except LIKE).
if (queryParallelism == null && likeName == null) {
queryParallelism = Integer.parseInt(child.getChild(0).getText());
} else {
throw new SemanticException("Conflicting create arguments " + ast.toStringTree());
}
break;
case HiveParser.TOK_LIKERP:
if (queryParallelism == null && likeName == null) {
likeName = unescapeIdentifier(child.getChild(0).getText());
} else {
throw new SemanticException("Conflicting create arguments " + ast.toStringTree());
}
break;
default: throw new SemanticException("Invalid create arguments " + ast.toStringTree());
}
}
CreateResourcePlanDesc desc = new CreateResourcePlanDesc(
resourcePlanName, queryParallelism, likeName);
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeShowResourcePlan(ASTNode ast) throws SemanticException {
String rpName = null;
if (ast.getChildCount() > 0) {
rpName = unescapeIdentifier(ast.getChild(0).getText());
}
if (ast.getChildCount() > 1) {
throw new SemanticException("Invalid syntax for SHOW RESOURCE PLAN statement");
}
ShowResourcePlanDesc showResourcePlanDesc = new ShowResourcePlanDesc(rpName, ctx.getResFile());
addServiceOutput();
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), showResourcePlanDesc)));
setFetchTask(createFetchTask(showResourcePlanDesc.getSchema(rpName)));
}
private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException {
if (ast.getChildCount() < 1) {
throw new SemanticException("Incorrect syntax");
}
Tree nameOrGlobal = ast.getChild(0);
switch (nameOrGlobal.getType()) {
case HiveParser.TOK_ENABLE:
// This command exists solely to output this message. TODO: can we do it w/o an error?
throw new SemanticException("Activate a resource plan to enable workload management");
case HiveParser.TOK_DISABLE:
WMNullableResourcePlan anyRp = new WMNullableResourcePlan();
anyRp.setStatus(WMResourcePlanStatus.ENABLED);
AlterResourcePlanDesc desc = new AlterResourcePlanDesc(
anyRp, null, false, false, true, false);
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
return;
default: // Continue to handle changes to a specific plan.
}
if (ast.getChildCount() < 2) {
throw new SemanticException("Invalid syntax for ALTER RESOURCE PLAN statement");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan();
boolean isEnableActivate = false, isReplace = false;
boolean validate = false;
for (int i = 1; i < ast.getChildCount(); ++i) {
Tree child = ast.getChild(i);
switch (child.getType()) {
case HiveParser.TOK_VALIDATE:
validate = true;
break;
case HiveParser.TOK_ACTIVATE:
if (resourcePlan.getStatus() == WMResourcePlanStatus.ENABLED) {
isEnableActivate = true;
}
if (child.getChildCount() > 1) {
throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree());
} else if (child.getChildCount() == 1) {
if (child.getChild(0).getType() != HiveParser.TOK_REPLACE) {
throw new SemanticException("Incorrect syntax " + ast.toStringTree());
}
isReplace = true;
isEnableActivate = false; // Implied.
}
resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE);
break;
case HiveParser.TOK_ENABLE:
if (resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE) {
isEnableActivate = !isReplace;
} else {
resourcePlan.setStatus(WMResourcePlanStatus.ENABLED);
}
break;
case HiveParser.TOK_DISABLE:
resourcePlan.setStatus(WMResourcePlanStatus.DISABLED);
break;
case HiveParser.TOK_REPLACE:
isReplace = true;
if (child.getChildCount() > 1) {
throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree());
} else if (child.getChildCount() == 1) {
// Replace is essentially renaming a plan to the name of an existing plan, with backup.
resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText()));
} else {
resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE);
}
break;
case HiveParser.TOK_QUERY_PARALLELISM: {
if (child.getChildCount() != 1) {
throw new SemanticException("Expected one argument");
}
Tree val = child.getChild(0);
resourcePlan.setIsSetQueryParallelism(true);
if (val.getType() == HiveParser.TOK_NULL) {
resourcePlan.unsetQueryParallelism();
} else {
resourcePlan.setQueryParallelism(Integer.parseInt(val.getText()));
}
break;
}
case HiveParser.TOK_DEFAULT_POOL: {
if (child.getChildCount() != 1) {
throw new SemanticException("Expected one argument");
}
Tree val = child.getChild(0);
resourcePlan.setIsSetDefaultPoolPath(true);
if (val.getType() == HiveParser.TOK_NULL) {
resourcePlan.unsetDefaultPoolPath();
} else {
resourcePlan.setDefaultPoolPath(poolPath(child.getChild(0)));
}
break;
}
case HiveParser.TOK_RENAME:
if (child.getChildCount() != 1) {
throw new SemanticException("Expected one argument");
}
resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText()));
break;
default:
throw new SemanticException(
"Unexpected token in alter resource plan statement: " + child.getType());
}
}
AlterResourcePlanDesc desc = new AlterResourcePlanDesc(
resourcePlan, rpName, validate, isEnableActivate, false, isReplace);
if (validate) {
ctx.setResFile(ctx.getLocalTmpPath());
desc.setResFile(ctx.getResFile().toString());
}
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
if (validate) {
setFetchTask(createFetchTask(AlterResourcePlanDesc.getSchema()));
}
}
private void analyzeDropResourcePlan(ASTNode ast) throws SemanticException {
if (ast.getChildCount() == 0) {
throw new SemanticException("Expected name in DROP RESOURCE PLAN statement");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
DropResourcePlanDesc desc = new DropResourcePlanDesc(rpName);
addServiceOutput();
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeCreateTrigger(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 4) {
throw new SemanticException("Invalid syntax for create trigger statement");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String triggerName = unescapeIdentifier(ast.getChild(1).getText());
String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2));
String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3));
WMTrigger trigger = new WMTrigger(rpName, triggerName);
trigger.setTriggerExpression(triggerExpression);
trigger.setActionExpression(actionExpression);
CreateWMTriggerDesc desc = new CreateWMTriggerDesc(trigger);
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private String buildTriggerExpression(ASTNode ast) throws SemanticException {
if (ast.getType() != HiveParser.TOK_TRIGGER_EXPRESSION || ast.getChildCount() == 0) {
throw new SemanticException("Invalid trigger expression.");
}
StringBuilder builder = new StringBuilder();
for (int i = 0; i < ast.getChildCount(); ++i) {
builder.append(ast.getChild(i).getText()); // Don't strip quotes.
builder.append(' ');
}
builder.deleteCharAt(builder.length() - 1);
return builder.toString();
}
private String poolPath(Tree ast) {
StringBuilder builder = new StringBuilder();
builder.append(unescapeIdentifier(ast.getText()));
for (int i = 0; i < ast.getChildCount(); ++i) {
// DOT is not affected
builder.append(unescapeIdentifier(ast.getChild(i).getText()));
}
return builder.toString();
}
private String buildTriggerActionExpression(ASTNode ast) throws SemanticException {
switch (ast.getType()) {
case HiveParser.KW_KILL:
return "KILL";
case HiveParser.KW_MOVE:
if (ast.getChildCount() != 1) {
throw new SemanticException("Invalid move to clause in trigger action.");
}
String poolPath = poolPath(ast.getChild(0));
return "MOVE TO " + poolPath;
default:
throw new SemanticException("Unknown token in action clause: " + ast.getType());
}
}
private void analyzeAlterTrigger(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 4) {
throw new SemanticException("Invalid syntax for alter trigger statement");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String triggerName = unescapeIdentifier(ast.getChild(1).getText());
String triggerExpression = buildTriggerExpression((ASTNode)ast.getChild(2));
String actionExpression = buildTriggerActionExpression((ASTNode)ast.getChild(3));
WMTrigger trigger = new WMTrigger(rpName, triggerName);
trigger.setTriggerExpression(triggerExpression);
trigger.setActionExpression(actionExpression);
AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger);
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeDropTrigger(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 2) {
throw new SemanticException("Invalid syntax for drop trigger.");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String triggerName = unescapeIdentifier(ast.getChild(1).getText());
DropWMTriggerDesc desc = new DropWMTriggerDesc(rpName, triggerName);
addServiceOutput();
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeCreatePool(ASTNode ast) throws SemanticException {
// TODO: allow defaults for e.g. scheduling policy.
if (ast.getChildCount() < 3) {
throw new SemanticException("Expected more arguments: " + ast.toStringTree());
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String poolPath = poolPath(ast.getChild(1));
WMPool pool = new WMPool(rpName, poolPath);
for (int i = 2; i < ast.getChildCount(); ++i) {
Tree child = ast.getChild(i);
if (child.getChildCount() != 1) {
throw new SemanticException("Expected 1 paramter for: " + child.getText());
}
String param = child.getChild(0).getText();
switch (child.getType()) {
case HiveParser.TOK_ALLOC_FRACTION:
pool.setAllocFraction(Double.parseDouble(param));
break;
case HiveParser.TOK_QUERY_PARALLELISM:
pool.setQueryParallelism(Integer.parseInt(param));
break;
case HiveParser.TOK_SCHEDULING_POLICY:
String schedulingPolicyStr = PlanUtils.stripQuotes(param);
if (!MetaStoreUtils.isValidSchedulingPolicy(schedulingPolicyStr)) {
throw new SemanticException("Invalid scheduling policy " + schedulingPolicyStr);
}
pool.setSchedulingPolicy(schedulingPolicyStr);
break;
case HiveParser.TOK_PATH:
throw new SemanticException("Invalid parameter path in create pool");
}
}
if (!pool.isSetAllocFraction()) {
throw new SemanticException("alloc_fraction should be specified for a pool");
}
if (!pool.isSetQueryParallelism()) {
throw new SemanticException("query_parallelism should be specified for a pool");
}
CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, false);
addServiceOutput();
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeAlterPool(ASTNode ast) throws SemanticException {
if (ast.getChildCount() < 3) {
throw new SemanticException("Invalid syntax for alter pool: " + ast.toStringTree());
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
Tree poolTarget = ast.getChild(1);
boolean isUnmanagedPool = false;
String poolPath = null;
if (poolTarget.getType() == HiveParser.TOK_UNMANAGED) {
isUnmanagedPool = true;
} else {
poolPath = poolPath(ast.getChild(1));
}
WMNullablePool poolChanges = null;
boolean hasTrigger = false;
for (int i = 2; i < ast.getChildCount(); ++i) {
Tree child = ast.getChild(i);
if (child.getChildCount() != 1) {
throw new SemanticException("Invalid syntax in alter pool expected parameter.");
}
Tree param = child.getChild(0);
if (child.getType() == HiveParser.TOK_ADD_TRIGGER
|| child.getType() == HiveParser.TOK_DROP_TRIGGER) {
hasTrigger = true;
boolean drop = child.getType() == HiveParser.TOK_DROP_TRIGGER;
String triggerName = unescapeIdentifier(param.getText());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
new CreateOrDropTriggerToPoolMappingDesc(
rpName, triggerName, poolPath, drop, isUnmanagedPool))));
} else {
if (isUnmanagedPool) {
throw new SemanticException("Cannot alter the unmanaged pool");
}
if (poolChanges == null) {
poolChanges = new WMNullablePool(rpName, null);
}
switch (child.getType()) {
case HiveParser.TOK_ALLOC_FRACTION:
poolChanges.setAllocFraction(Double.parseDouble(param.getText()));
break;
case HiveParser.TOK_QUERY_PARALLELISM:
poolChanges.setQueryParallelism(Integer.parseInt(param.getText()));
break;
case HiveParser.TOK_SCHEDULING_POLICY:
poolChanges.setIsSetSchedulingPolicy(true);
if (param.getType() != HiveParser.TOK_NULL) {
poolChanges.setSchedulingPolicy(PlanUtils.stripQuotes(param.getText()));
}
break;
case HiveParser.TOK_PATH:
poolChanges.setPoolPath(poolPath(param));
break;
default: throw new SemanticException("Incorrect alter syntax: " + child.toStringTree());
}
}
}
if (poolChanges != null || hasTrigger) {
addServiceOutput();
}
if (poolChanges != null) {
if (!poolChanges.isSetPoolPath()) {
poolChanges.setPoolPath(poolPath);
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
new CreateOrAlterWMPoolDesc(poolChanges, poolPath, true))));
}
}
private void analyzeDropPool(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 2) {
throw new SemanticException("Invalid syntax for drop pool.");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String poolPath = poolPath(ast.getChild(1));
DropWMPoolDesc desc = new DropWMPoolDesc(rpName, poolPath);
addServiceOutput();
rootTasks.add(TaskFactory.get(
new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeCreateOrAlterMapping(ASTNode ast, boolean update) throws SemanticException {
if (ast.getChildCount() < 4) {
throw new SemanticException("Invalid syntax for create or alter mapping.");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String entityType = ast.getChild(1).getText();
String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText());
WMMapping mapping = new WMMapping(rpName, entityType, entityName);
Tree dest = ast.getChild(3);
if (dest.getType() != HiveParser.TOK_UNMANAGED) {
mapping.setPoolPath(poolPath(dest));
} // Null path => unmanaged
if (ast.getChildCount() == 5) {
mapping.setOrdering(Integer.valueOf(ast.getChild(4).getText()));
}
CreateOrAlterWMMappingDesc desc = new CreateOrAlterWMMappingDesc(mapping, update);
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeDropMapping(ASTNode ast) throws SemanticException {
if (ast.getChildCount() != 3) {
throw new SemanticException("Invalid syntax for drop mapping.");
}
String rpName = unescapeIdentifier(ast.getChild(0).getText());
String entityType = ast.getChild(1).getText();
String entityName = PlanUtils.stripQuotes(ast.getChild(2).getText());
DropWMMappingDesc desc = new DropWMMappingDesc(new WMMapping(rpName, entityType, entityName));
addServiceOutput();
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifNotExists = false;
String dbComment = null;
String dbLocation = null;
Map dbProps = null;
for (int i = 1; i < ast.getChildCount(); i++) {
ASTNode childNode = (ASTNode) ast.getChild(i);
switch (childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATABASECOMMENT:
dbComment = unescapeSQLString(childNode.getChild(0).getText());
break;
case TOK_DATABASEPROPERTIES:
dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0));
break;
case TOK_DATABASELOCATION:
dbLocation = unescapeSQLString(childNode.getChild(0).getText());
addLocationToOutputs(dbLocation);
break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
}
}
CreateDatabaseDesc createDatabaseDesc =
new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists);
if (dbProps != null) {
createDatabaseDesc.setDatabaseProperties(dbProps);
}
Database database = new Database(dbName, dbComment, dbLocation, dbProps);
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
createDatabaseDesc)));
}
private void analyzeDropDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifExists = false;
boolean ifCascade = false;
if (null != ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS)) {
ifExists = true;
}
if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
ifCascade = true;
}
Database database = getDatabase(dbName, !ifExists);
if (database == null) {
return;
}
// if cascade=true, then we need to authorize the drop table action as well
if (ifCascade) {
// add the tables as well to outputs
List tableNames;
// get names of all tables under this dbName
try {
tableNames = db.getAllTables(dbName);
} catch (HiveException e) {
throw new SemanticException(e);
}
// add tables to outputs
if (tableNames != null) {
for (String tableName : tableNames) {
Table table = getTable(dbName, tableName, true);
// We want no lock here, as the database lock will cover the tables,
// and putting a lock will actually cause us to deadlock on ourselves.
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
}
}
}
inputs.add(new ReadEntity(database));
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDatabaseDesc dropDatabaseDesc = new DropDatabaseDesc(dbName, ifExists, ifCascade,
new ReplicationSpec());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropDatabaseDesc)));
}
private void analyzeSwitchDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
Database database = getDatabase(dbName, true);
ReadEntity dbReadEntity = new ReadEntity(database);
dbReadEntity.noLockNeeded();
inputs.add(dbReadEntity);
SwitchDatabaseDesc switchDatabaseDesc = new SwitchDatabaseDesc(dbName);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
switchDatabaseDesc)));
}
private void analyzeDropTable(ASTNode ast, TableType expectedType)
throws SemanticException {
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
// we want to signal an error if the table/view doesn't exist and we're
// configured not to fail silently
boolean throwException =
!ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
ReplicationSpec replicationSpec = new ReplicationSpec(ast);
Table tab = getTable(tableName, throwException);
if (tab != null) {
inputs.add(new ReadEntity(tab));
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
}
boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null);
DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
dropTblDesc)));
}
private void analyzeTruncateTable(ASTNode ast) throws SemanticException {
ASTNode root = (ASTNode) ast.getChild(0); // TOK_TABLE_PARTITION
String tableName = getUnescapedName((ASTNode) root.getChild(0));
Table table = getTable(tableName, true);
if (table.getTableType() != TableType.MANAGED_TABLE) {
throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName));
}
if (table.isNonNative()) {
throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName)); //TODO
}
if (!table.isPartitioned() && root.getChildCount() > 1) {
throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName));
}
Map partSpec = getPartSpec((ASTNode) root.getChild(1));
if (partSpec == null) {
if (!table.isPartitioned()) {
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE));
} else {
for (Partition partition : getPartitions(table, null, false)) {
outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
}
}
} else {
if (isFullSpec(table, partSpec)) {
validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, true);
Partition partition = getPartition(table, partSpec, true);
outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
} else {
validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, false);
for (Partition partition : getPartitions(table, partSpec, false)) {
outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
}
}
}
TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec, null);
DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
Task extends Serializable> truncateTask = TaskFactory.get(ddlWork);
// Is this a truncate column command
List columnNames = null;
if (ast.getChildCount() == 2) {
try {
columnNames = getColumnNames((ASTNode)ast.getChild(1));
// It would be possible to support this, but this is such a pointless command.
if (AcidUtils.isInsertOnlyTable(table.getParameters())) {
throw new SemanticException("Truncating MM table columns not presently supported");
}
List bucketCols = null;
Class extends InputFormat> inputFormatClass = null;
boolean isArchived = false;
Path newTblPartLoc = null;
Path oldTblPartLoc = null;
List cols = null;
ListBucketingCtx lbCtx = null;
boolean isListBucketed = false;
List listBucketColNames = null;
if (table.isPartitioned()) {
Partition part = db.getPartition(table, partSpec, false);
Path tabPath = table.getPath();
Path partPath = part.getDataLocation();
// if the table is in a different dfs than the partition,
// replace the partition's dfs with the table's dfs.
newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
.getAuthority(), partPath.toUri().getPath());
oldTblPartLoc = partPath;
cols = part.getCols();
bucketCols = part.getBucketCols();
inputFormatClass = part.getInputFormatClass();
isArchived = ArchiveUtils.isArchived(part);
lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
isListBucketed = part.isStoredAsSubDirectories();
listBucketColNames = part.getSkewedColNames();
} else {
// input and output are the same
oldTblPartLoc = table.getPath();
newTblPartLoc = table.getPath();
cols = table.getCols();
bucketCols = table.getBucketCols();
inputFormatClass = table.getInputFormatClass();
lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
isListBucketed = table.isStoredAsSubDirectories();
listBucketColNames = table.getSkewedColNames();
}
// throw a HiveException for non-rcfile.
if (!inputFormatClass.equals(RCFileInputFormat.class)) {
throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
}
// throw a HiveException if the table/partition is archived
if (isArchived) {
throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
}
Set columnIndexes = new HashSet();
for (String columnName : columnNames) {
boolean found = false;
for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
columnIndexes.add(columnIndex);
found = true;
break;
}
}
// Throw an exception if the user is trying to truncate a column which doesn't exist
if (!found) {
throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
}
// Throw an exception if the table/partition is bucketed on one of the columns
for (String bucketCol : bucketCols) {
if (bucketCol.equalsIgnoreCase(columnName)) {
throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
}
}
if (isListBucketed) {
for (String listBucketCol : listBucketColNames) {
if (listBucketCol.equalsIgnoreCase(columnName)) {
throw new SemanticException(
ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
}
}
}
}
truncateTblDesc.setColumnIndexes(new ArrayList(columnIndexes));
truncateTblDesc.setInputDir(oldTblPartLoc);
truncateTblDesc.setLbCtx(lbCtx);
addInputsOutputsAlterTable(tableName, partSpec, AlterTableTypes.TRUNCATE);
ddlWork.setNeedLock(true);
TableDesc tblDesc = Utilities.getTableDesc(table);
// Write the output to temporary directory and move it to the final location at the end
// so the operation is atomic.
Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
truncateTblDesc.setOutputDir(queryTmpdir);
LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
partSpec == null ? new HashMap<>() : partSpec);
ltd.setLbCtx(lbCtx);
@SuppressWarnings("unchecked")
Task moveTsk =
TaskFactory.get(new MoveWork(null, null, ltd, null, false));
truncateTask.addDependentTask(moveTsk);
// Recalculate the HDFS stats if auto gather stats is set
if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
BasicStatsWork basicStatsWork;
if (oldTblPartLoc.equals(newTblPartLoc)) {
// If we're merging to the same location, we can avoid some metastore calls
TableSpec tablepart = new TableSpec(this.db, conf, root);
basicStatsWork = new BasicStatsWork(tablepart);
} else {
basicStatsWork = new BasicStatsWork(ltd);
}
basicStatsWork.setNoStatsAggregator(true);
basicStatsWork.setClearAggregatorStats(true);
StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, conf);
Task extends Serializable> statTask = TaskFactory.get(columnStatsWork);
moveTsk.addDependentTask(statTask);
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
rootTasks.add(truncateTask);
}
public static boolean isFullSpec(Table table, Map partSpec) {
for (FieldSchema partCol : table.getPartCols()) {
if (partSpec.get(partCol.getName()) == null) {
return false;
}
}
return true;
}
private void validateAlterTableType(Table tbl, AlterTableTypes op) throws SemanticException {
validateAlterTableType(tbl, op, false);
}
private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expectView)
throws SemanticException {
if (tbl.isView()) {
if (!expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_VIEWS.getMsg());
}
switch (op) {
case ADDPARTITION:
case DROPPARTITION:
case RENAMEPARTITION:
case ADDPROPS:
case DROPPROPS:
case RENAME:
// allow this form
break;
default:
throw new SemanticException(ErrorMsg.ALTER_VIEW_DISALLOWED_OP.getMsg(op.toString()));
}
} else {
if (expectView) {
throw new SemanticException(ErrorMsg.ALTER_COMMAND_FOR_TABLES.getMsg());
}
}
if (tbl.isNonNative() && !AlterTableTypes.nonNativeTableAllowedTypes.contains(op)) {
throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
}
}
private boolean hasConstraintsEnabled(final String tblName) throws SemanticException{
NotNullConstraint nnc = null;
DefaultConstraint dc = null;
try {
// retrieve enabled NOT NULL constraint from metastore
nnc = Hive.get().getEnabledNotNullConstraints(
db.getDatabaseCurrent().getName(), tblName);
dc = Hive.get().getEnabledDefaultConstraints(
db.getDatabaseCurrent().getName(), tblName);
} catch (Exception e) {
if (e instanceof SemanticException) {
throw (SemanticException) e;
} else {
throw (new RuntimeException(e));
}
}
if((nnc != null && !nnc.getNotNullConstraints().isEmpty())
|| (dc != null && !dc.getDefaultConstraints().isEmpty())) {
return true;
}
return false;
}
private void analyzeAlterTableProps(String[] qualified, HashMap partSpec,
ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException {
String tableName = getDotName(qualified);
HashMap mapProp = getProps((ASTNode) (ast.getChild(0))
.getChild(0));
EnvironmentContext environmentContext = null;
// we need to check if the properties are valid, especially for stats.
// they might be changed via alter table .. update statistics or
// alter table .. set tblproperties. If the property is not row_count
// or raw_data_size, it could not be changed through update statistics
boolean changeStatsSucceeded = false;
for (Entry entry : mapProp.entrySet()) {
// we make sure that we do not change anything if there is anything
// wrong.
if (entry.getKey().equals(StatsSetupConst.ROW_COUNT)
|| entry.getKey().equals(StatsSetupConst.RAW_DATA_SIZE)) {
try {
Long.parseLong(entry.getValue());
changeStatsSucceeded = true;
} catch (Exception e) {
throw new SemanticException("AlterTable " + entry.getKey() + " failed with value "
+ entry.getValue());
}
}
// if table is being modified to be external we need to make sure existing table
// doesn't have enabled constraint since constraints are disallowed with such tables
else if(entry.getKey().equals("external") && entry.getValue().equals("true")){
if(hasConstraintsEnabled(qualified[1])){
throw new SemanticException(
ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName + " has constraints enabled."
+ "Please remove those constraints to change this property."));
}
}
else {
if (queryState.getCommandType()
.equals(HiveOperation.ALTERTABLE_UPDATETABLESTATS.getOperationName())
|| queryState.getCommandType()
.equals(HiveOperation.ALTERTABLE_UPDATEPARTSTATS.getOperationName())) {
throw new SemanticException("AlterTable UpdateStats " + entry.getKey()
+ " failed because the only valid keys are " + StatsSetupConst.ROW_COUNT + " and "
+ StatsSetupConst.RAW_DATA_SIZE);
}
}
if (changeStatsSucceeded) {
environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.USER);
}
}
AlterTableDesc alterTblDesc = null;
if (isUnset == true) {
alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, partSpec, expectView);
if (ast.getChild(1) != null) {
alterTblDesc.setDropIfExists(true);
}
} else {
alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, expectView);
}
alterTblDesc.setProps(mapProp);
alterTblDesc.setEnvironmentContext(environmentContext);
alterTblDesc.setOldName(tableName);
boolean isPotentialMmSwitch = AcidUtils.isTablePropertyTransactional(mapProp)
|| mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, isPotentialMmSwitch);
DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), alterTblDesc);
if (isPotentialMmSwitch) {
this.ddlDescWithWriteId = alterTblDesc;
ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks?
}
rootTasks.add(TaskFactory.get(ddlWork));
}
@Override
public DDLDescWithWriteId getAcidDdlDesc() {
return ddlDescWithWriteId;
}
private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName,
HashMap partSpec)
throws SemanticException {
HashMap mapProp = getProps((ASTNode) (ast.getChild(0))
.getChild(0));
AlterTableDesc alterTblDesc = new AlterTableDesc(
AlterTableTypes.ADDSERDEPROPS);
alterTblDesc.setProps(mapProp);
alterTblDesc.setOldName(tableName);
alterTblDesc.setPartSpec(partSpec);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private void analyzeAlterTableSerde(ASTNode ast, String tableName,
HashMap partSpec)
throws SemanticException {
String serdeName = unescapeSQLString(ast.getChild(0).getText());
AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDSERDE);
if (ast.getChildCount() > 1) {
HashMap mapProp = getProps((ASTNode) (ast.getChild(1))
.getChild(0));
alterTblDesc.setProps(mapProp);
}
alterTblDesc.setOldName(tableName);
alterTblDesc.setSerdeName(serdeName);
alterTblDesc.setPartSpec(partSpec);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private void analyzeAlterTableFileFormat(ASTNode ast, String tableName,
HashMap partSpec)
throws SemanticException {
StorageFormat format = new StorageFormat(conf);
ASTNode child = (ASTNode) ast.getChild(0);
if (!format.fillStorageFormat(child)) {
throw new AssertionError("Unknown token " + child.getText());
}
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, format.getInputFormat(),
format.getOutputFormat(), format.getSerde(), format.getStorageHandler(), partSpec);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private WriteType determineAlterTableWriteType(Table tab, AlterTableDesc desc, AlterTableTypes op) {
boolean convertingToAcid = false;
if(desc != null && desc.getProps() != null && Boolean.parseBoolean(desc.getProps().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))) {
convertingToAcid = true;
}
if(!AcidUtils.isTransactionalTable(tab) && convertingToAcid) {
//non-acid to transactional conversion (property itself) must be mutexed to prevent concurrent writes.
// See HIVE-16688 for use cases.
return WriteType.DDL_EXCLUSIVE;
}
return WriteEntity.determineAlterTableWriteType(op);
}
private void addInputsOutputsAlterTable(String tableName, Map partSpec,
AlterTableTypes op) throws SemanticException {
addInputsOutputsAlterTable(tableName, partSpec, null, op, false);
}
private void addInputsOutputsAlterTable(String tableName, Map partSpec,
AlterTableDesc desc, boolean doForceExclusive) throws SemanticException {
addInputsOutputsAlterTable(tableName, partSpec, desc, desc.getOp(), doForceExclusive);
}
private void addInputsOutputsAlterTable(String tableName, Map partSpec,
AlterTableDesc desc) throws SemanticException {
addInputsOutputsAlterTable(tableName, partSpec, desc, desc.getOp(), false);
}
private void addInputsOutputsAlterTable(String tableName, Map partSpec,
AlterTableDesc desc, AlterTableTypes op, boolean doForceExclusive) throws SemanticException {
boolean isCascade = desc != null && desc.getIsCascade();
boolean alterPartitions = partSpec != null && !partSpec.isEmpty();
//cascade only occurs at table level then cascade to partition level
if (isCascade && alterPartitions) {
throw new SemanticException(
ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName());
}
Table tab = getTable(tableName, true);
// cascade only occurs with partitioned table
if (isCascade && !tab.isPartitioned()) {
throw new SemanticException(
ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED);
}
// Determine the lock type to acquire
WriteEntity.WriteType writeType = doForceExclusive
? WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(tab, desc, op);
if (!alterPartitions) {
inputs.add(new ReadEntity(tab));
alterTableOutput = new WriteEntity(tab, writeType);
outputs.add(alterTableOutput);
//do not need the lock for partitions since they are covered by the table lock
if (isCascade) {
for (Partition part : getPartitions(tab, partSpec, false)) {
outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
}
}
} else {
ReadEntity re = new ReadEntity(tab);
// In the case of altering a table for its partitions we don't need to lock the table
// itself, just the partitions. But the table will have a ReadEntity. So mark that
// ReadEntity as no lock.
re.noLockNeeded();
inputs.add(re);
if (isFullSpec(tab, partSpec)) {
// Fully specified partition spec
Partition part = getPartition(tab, partSpec, true);
outputs.add(new WriteEntity(part, writeType));
} else {
// Partial partition spec supplied. Make sure this is allowed.
if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) {
throw new SemanticException(
ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName());
} else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) {
throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED);
}
for (Partition part : getPartitions(tab, partSpec, true)) {
outputs.add(new WriteEntity(part, writeType));
}
}
}
if (desc != null) {
validateAlterTableType(tab, op, desc.getExpectView());
// validate Unset Non Existed Table Properties
if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) {
Map tableParams = tab.getTTable().getParameters();
for (String currKey : desc.getProps().keySet()) {
if (!tableParams.containsKey(currKey)) {
String errorMsg =
"The following property " + currKey +
" does not exist in " + tab.getTableName();
throw new SemanticException(
ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg));
}
}
}
}
}
private void analyzeAlterTableOwner(ASTNode ast, String tableName) throws SemanticException {
PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) ast.getChild(0));
if (ownerPrincipal.getType() == null) {
throw new SemanticException("Owner type can't be null in alter table set owner command");
}
if (ownerPrincipal.getName() == null) {
throw new SemanticException("Owner name can't be null in alter table set owner command");
}
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, ownerPrincipal);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
}
private void analyzeAlterTableLocation(ASTNode ast, String tableName,
HashMap partSpec) throws SemanticException {
String newLocation = unescapeSQLString(ast.getChild(0).getText());
try {
// To make sure host/port pair is valid, the status of the location
// does not matter
FileSystem.get(new URI(newLocation), conf).getFileStatus(new Path(newLocation));
} catch (FileNotFoundException e) {
// Only check host/port pair is valid, wheter the file exist or not does not matter
} catch (Exception e) {
throw new SemanticException("Cannot connect to namenode, please check if host/port pair for " + newLocation + " is valid", e);
}
addLocationToOutputs(newLocation);
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, newLocation, partSpec);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private void analyzeAlterTablePartMergeFiles(ASTNode ast,
String tableName, HashMap partSpec)
throws SemanticException {
AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc(
tableName, partSpec);
List inputDir = new ArrayList();
Path oldTblPartLoc = null;
Path newTblPartLoc = null;
Table tblObj = null;
ListBucketingCtx lbCtx = null;
try {
tblObj = getTable(tableName);
if(AcidUtils.isTransactionalTable(tblObj)) {
LinkedHashMap newPartSpec = null;
if (partSpec != null) {
newPartSpec = new LinkedHashMap<>(partSpec);
}
boolean isBlocking = !HiveConf.getBoolVar(conf,
ConfVars.TRANSACTIONAL_CONCATENATE_NOBLOCK, false);
AlterTableSimpleDesc desc = new AlterTableSimpleDesc(
tableName, newPartSpec, "MAJOR", isBlocking);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
return;
}
mergeDesc.setTableDesc(Utilities.getTableDesc(tblObj));
List bucketCols = null;
Class extends InputFormat> inputFormatClass = null;
boolean isArchived = false;
if (tblObj.isPartitioned()) {
if (partSpec == null) {
throw new SemanticException("source table " + tableName
+ " is partitioned but no partition desc found.");
} else {
Partition part = getPartition(tblObj, partSpec, false);
if (part == null) {
throw new SemanticException("source table " + tableName
+ " is partitioned but partition not found.");
}
bucketCols = part.getBucketCols();
inputFormatClass = part.getInputFormatClass();
isArchived = ArchiveUtils.isArchived(part);
Path tabPath = tblObj.getPath();
Path partPath = part.getDataLocation();
// if the table is in a different dfs than the partition,
// replace the partition's dfs with the table's dfs.
newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
.getAuthority(), partPath.toUri().getPath());
oldTblPartLoc = partPath;
lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
}
} else {
inputFormatClass = tblObj.getInputFormatClass();
bucketCols = tblObj.getBucketCols();
// input and output are the same
oldTblPartLoc = tblObj.getPath();
newTblPartLoc = tblObj.getPath();
lbCtx = constructListBucketingCtx(tblObj.getSkewedColNames(), tblObj.getSkewedColValues(),
tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories(), conf);
}
// throw a HiveException for other than rcfile and orcfile.
if (!((inputFormatClass.equals(RCFileInputFormat.class) ||
(inputFormatClass.equals(OrcInputFormat.class))))) {
throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_FILE_FORMAT.getMsg());
}
mergeDesc.setInputFormatClass(inputFormatClass);
// throw a HiveException if the table/partition is bucketized
if (bucketCols != null && bucketCols.size() > 0) {
throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_BUCKETED.getMsg());
}
// throw a HiveException if the table/partition is archived
if (isArchived) {
throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_PARTITION_ARCHIVED.getMsg());
}
// non-native and non-managed tables are not supported as MoveTask requires filenames to be in specific format,
// violating which can cause data loss
if (tblObj.isNonNative()) {
throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NON_NATIVE.getMsg());
}
if (tblObj.getTableType() != TableType.MANAGED_TABLE) {
throw new SemanticException(ErrorMsg.CONCATENATE_UNSUPPORTED_TABLE_NOT_MANAGED.getMsg());
}
inputDir.add(oldTblPartLoc);
mergeDesc.setInputDir(inputDir);
mergeDesc.setLbCtx(lbCtx);
addInputsOutputsAlterTable(tableName, partSpec, AlterTableTypes.MERGEFILES);
DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
ddlWork.setNeedLock(true);
Task extends Serializable> mergeTask = TaskFactory.get(ddlWork);
TableDesc tblDesc = Utilities.getTableDesc(tblObj);
Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
mergeDesc.setOutputDir(queryTmpdir);
// No need to handle MM tables - unsupported path.
LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
partSpec == null ? new HashMap<>() : partSpec);
ltd.setLbCtx(lbCtx);
Task moveTsk =
TaskFactory.get(new MoveWork(null, null, ltd, null, false));
mergeTask.addDependentTask(moveTsk);
if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
BasicStatsWork basicStatsWork;
if (oldTblPartLoc.equals(newTblPartLoc)) {
// If we're merging to the same location, we can avoid some metastore calls
TableSpec tableSpec = new TableSpec(db, tableName, partSpec);
basicStatsWork = new BasicStatsWork(tableSpec);
} else {
basicStatsWork = new BasicStatsWork(ltd);
}
basicStatsWork.setNoStatsAggregator(true);
basicStatsWork.setClearAggregatorStats(true);
StatsWork columnStatsWork = new StatsWork(tblObj, basicStatsWork, conf);
Task extends Serializable> statTask = TaskFactory.get(columnStatsWork);
moveTsk.addDependentTask(statTask);
}
rootTasks.add(mergeTask);
} catch (Exception e) {
throw new SemanticException(e);
}
}
private void analyzeAlterTableClusterSort(ASTNode ast, String tableName,
HashMap partSpec) throws SemanticException {
AlterTableDesc alterTblDesc;
switch (ast.getChild(0).getType()) {
case HiveParser.TOK_NOT_CLUSTERED:
alterTblDesc = new AlterTableDesc(tableName, -1, new ArrayList(),
new ArrayList(), partSpec);
break;
case HiveParser.TOK_NOT_SORTED:
alterTblDesc = new AlterTableDesc(tableName, true, partSpec);
break;
case HiveParser.TOK_ALTERTABLE_BUCKETS:
ASTNode buckets = (ASTNode) ast.getChild(0);
List bucketCols = getColumnNames((ASTNode) buckets.getChild(0));
List sortCols = new ArrayList();
int numBuckets = -1;
if (buckets.getChildCount() == 2) {
numBuckets = Integer.parseInt(buckets.getChild(1).getText());
} else {
sortCols = getColumnNamesOrder((ASTNode) buckets.getChild(1));
numBuckets = Integer.parseInt(buckets.getChild(2).getText());
}
if (numBuckets <= 0) {
throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg());
}
alterTblDesc = new AlterTableDesc(tableName, numBuckets,
bucketCols, sortCols, partSpec);
break;
default:
throw new SemanticException("Invalid operation " + ast.getChild(0).getType());
}
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
}
private void analyzeAlterTableCompact(ASTNode ast, String tableName,
HashMap partSpec) throws SemanticException {
String type = unescapeSQLString(ast.getChild(0).getText()).toLowerCase();
if (!type.equals("minor") && !type.equals("major")) {
throw new SemanticException(ErrorMsg.INVALID_COMPACTION_TYPE.getMsg());
}
LinkedHashMap newPartSpec = null;
if (partSpec != null) {
newPartSpec = new LinkedHashMap(partSpec);
}
HashMap mapProp = null;
boolean isBlocking = false;
for(int i = 0; i < ast.getChildCount(); i++) {
switch(ast.getChild(i).getType()) {
case HiveParser.TOK_TABLEPROPERTIES:
mapProp = getProps((ASTNode) (ast.getChild(i)).getChild(0));
break;
case HiveParser.TOK_BLOCKING:
isBlocking = true;
break;
}
}
AlterTableSimpleDesc desc = new AlterTableSimpleDesc(
tableName, newPartSpec, type, isBlocking);
desc.setProps(mapProp);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName)
throws SemanticException {
String dropConstraintName = unescapeIdentifier(ast.getChild(0).getText());
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName, (ReplicationSpec)null);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName)
throws SemanticException {
ASTNode parent = (ASTNode) ast.getParent();
String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0));
// TODO CAT - for now always use the default catalog. Eventually will want to see if
// the user specified a catalog
String catName = MetaStoreUtils.getDefaultCatalog(conf);
ASTNode child = (ASTNode) ast.getChild(0);
List primaryKeys = new ArrayList<>();
List foreignKeys = new ArrayList<>();
List uniqueConstraints = new ArrayList<>();
List checkConstraints = new ArrayList<>();
switch (child.getToken().getType()) {
case HiveParser.TOK_UNIQUE:
BaseSemanticAnalyzer.processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1],
child, uniqueConstraints);
break;
case HiveParser.TOK_PRIMARY_KEY:
BaseSemanticAnalyzer.processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1],
child, primaryKeys);
break;
case HiveParser.TOK_FOREIGN_KEY:
BaseSemanticAnalyzer.processForeignKeys(qualifiedTabName[0], qualifiedTabName[1],
child, foreignKeys);
break;
case HiveParser.TOK_CHECK_CONSTRAINT:
BaseSemanticAnalyzer.processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1],
child, null, checkConstraints, child,
this.ctx.getTokenRewriteStream());
break;
default:
throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg(
child.getToken().getText()));
}
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, primaryKeys, foreignKeys,
uniqueConstraints, null, null, checkConstraints, null);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc)));
}
private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName,
HashMap partSpec) throws SemanticException {
boolean isCascade = false;
if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
isCascade = true;
}
AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.UPDATECOLUMNS);
alterTblDesc.setOldName(tableName);
alterTblDesc.setIsCascade(isCascade);
alterTblDesc.setPartSpec(partSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf));
}
static HashMap getProps(ASTNode prop) {
// Must be deterministic order map for consistent q-test output across Java versions
HashMap mapProp = new LinkedHashMap();
readProps(prop, mapProp);
return mapProp;
}
/**
* Utility class to resolve QualifiedName
*/
static class QualifiedNameUtil {
// delimiter to check DOT delimited qualified names
static final String delimiter = "\\.";
/**
* Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT
* ^(DOT a b) c) will generate a name of the form a.b.c
*
* @param ast
* The AST from which the qualified name has to be extracted
* @return String
*/
static public String getFullyQualifiedName(ASTNode ast) {
if (ast.getChildCount() == 0) {
return ast.getText();
} else if (ast.getChildCount() == 2) {
return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "."
+ getFullyQualifiedName((ASTNode) ast.getChild(1));
} else if (ast.getChildCount() == 3) {
return getFullyQualifiedName((ASTNode) ast.getChild(0)) + "."
+ getFullyQualifiedName((ASTNode) ast.getChild(1)) + "."
+ getFullyQualifiedName((ASTNode) ast.getChild(2));
} else {
return null;
}
}
// get the column path
// return column name if exists, column could be DOT separated.
// example: lintString.$elem$.myint
// return table name for column name if no column has been specified.
static public String getColPath(
Hive db,
ASTNode node,
String dbName,
String tableName,
Map partSpec) throws SemanticException {
// if this ast has only one child, then no column name specified.
if (node.getChildCount() == 1) {
return tableName;
}
ASTNode columnNode = null;
// Second child node could be partitionspec or column
if (node.getChildCount() > 1) {
if (partSpec == null) {
columnNode = (ASTNode) node.getChild(1);
} else {
columnNode = (ASTNode) node.getChild(2);
}
}
if (columnNode != null) {
if (dbName == null) {
return tableName + "." + QualifiedNameUtil.getFullyQualifiedName(columnNode);
} else {
return tableName.substring(dbName.length() + 1, tableName.length()) + "." +
QualifiedNameUtil.getFullyQualifiedName(columnNode);
}
} else {
return tableName;
}
}
// get partition metadata
static public Map getPartitionSpec(Hive db, ASTNode ast, String tableName)
throws SemanticException {
ASTNode partNode = null;
// if this ast has only one child, then no partition spec specified.
if (ast.getChildCount() == 1) {
return null;
}
// if ast has two children
// the 2nd child could be partition spec or columnName
// if the ast has 3 children, the second *has to* be partition spec
if (ast.getChildCount() > 2 && (((ASTNode) ast.getChild(1)).getType() != HiveParser.TOK_PARTSPEC)) {
throw new SemanticException(((ASTNode) ast.getChild(1)).getType() + " is not a partition specification");
}
if (((ASTNode) ast.getChild(1)).getType() == HiveParser.TOK_PARTSPEC) {
partNode = (ASTNode) ast.getChild(1);
}
if (partNode != null) {
Table tab = null;
try {
tab = db.getTable(tableName);
}
catch (InvalidTableException e) {
throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e);
}
catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
HashMap partSpec = null;
try {
partSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false);
} catch (SemanticException e) {
// get exception in resolving partition
// it could be DESCRIBE table key
// return null
// continue processing for DESCRIBE table key
return null;
}
if (partSpec != null) {
Partition part = null;
try {
part = db.getPartition(tab, partSpec, false);
} catch (HiveException e) {
// if get exception in finding partition
// it could be DESCRIBE table key
// return null
// continue processing for DESCRIBE table key
return null;
}
// if partition is not found
// it is DESCRIBE table partition
// invalid partition exception
if (part == null) {
throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partSpec.toString()));
}
// it is DESCRIBE table partition
// return partition metadata
return partSpec;
}
}
return null;
}
}
private void validateDatabase(String databaseName) throws SemanticException {
try {
if (!db.databaseExists(databaseName)) {
throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName));
}
} catch (HiveException e) {
throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(databaseName), e);
}
}
private void validateTable(String tableName, Map partSpec)
throws SemanticException {
Table tab = getTable(tableName);
if (partSpec != null) {
getPartition(tab, partSpec, true);
}
}
/**
* A query like this will generate a tree as follows
* "describe formatted default.maptable partition (b=100) id;"
* TOK_TABTYPE
* TOK_TABNAME --> root for tablename, 2 child nodes mean DB specified
* default
* maptable
* TOK_PARTSPEC --> root node for partition spec. else columnName
* TOK_PARTVAL
* b
* 100
* id --> root node for columnName
* formatted
*/
private void analyzeDescribeTable(ASTNode ast) throws SemanticException {
ASTNode tableTypeExpr = (ASTNode) ast.getChild(0);
String dbName = null;
String tableName = null;
String colPath = null;
Map partSpec = null;
ASTNode tableNode = null;
// process the first node to extract tablename
// tablename is either TABLENAME or DBNAME.TABLENAME if db is given
if (((ASTNode) tableTypeExpr.getChild(0)).getType() == HiveParser.TOK_TABNAME) {
tableNode = (ASTNode) tableTypeExpr.getChild(0);
if (tableNode.getChildCount() == 1) {
tableName = ((ASTNode) tableNode.getChild(0)).getText();
} else {
dbName = ((ASTNode) tableNode.getChild(0)).getText();
tableName = dbName + "." + ((ASTNode) tableNode.getChild(1)).getText();
}
} else {
throw new SemanticException(((ASTNode) tableTypeExpr.getChild(0)).getText() + " is not an expected token type");
}
// process the second child,if exists, node to get partition spec(s)
partSpec = QualifiedNameUtil.getPartitionSpec(db, tableTypeExpr, tableName);
// process the third child node,if exists, to get partition spec(s)
colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, dbName, tableName, partSpec);
// if database is not the one currently using
// validate database
if (dbName != null) {
validateDatabase(dbName);
}
if (partSpec != null) {
validateTable(tableName, partSpec);
}
DescTableDesc descTblDesc = new DescTableDesc(
ctx.getResFile(), tableName, partSpec, colPath);
boolean showColStats = false;
if (ast.getChildCount() == 2) {
int descOptions = ast.getChild(1).getType();
descTblDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED);
descTblDesc.setExt(descOptions == HiveParser.KW_EXTENDED);
// in case of "DESCRIBE FORMATTED tablename column_name" statement, colPath
// will contain tablename.column_name. If column_name is not specified
// colPath will be equal to tableName. This is how we can differentiate
// if we are describing a table or column
if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) {
showColStats = true;
}
}
inputs.add(new ReadEntity(getTable(tableName)));
Task ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
descTblDesc));
rootTasks.add(ddlTask);
String schema = DescTableDesc.getSchema(showColStats);
setFetchTask(createFetchTask(schema));
LOG.info("analyzeDescribeTable done");
}
/**
* Describe database.
*
* @param ast
* @throws SemanticException
*/
private void analyzeDescDatabase(ASTNode ast) throws SemanticException {
boolean isExtended;
String dbName;
if (ast.getChildCount() == 1) {
dbName = stripQuotes(ast.getChild(0).getText());
isExtended = false;
} else if (ast.getChildCount() == 2) {
dbName = stripQuotes(ast.getChild(0).getText());
isExtended = true;
} else {
throw new SemanticException("Unexpected Tokens at DESCRIBE DATABASE");
}
DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(),
dbName, isExtended);
inputs.add(new ReadEntity(getDatabase(dbName)));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descDbDesc)));
setFetchTask(createFetchTask(descDbDesc.getSchema()));
}
public static HashMap getPartSpec(ASTNode partspec)
throws SemanticException {
if (partspec == null) {
return null;
}
HashMap partSpec = new LinkedHashMap();
for (int i = 0; i < partspec.getChildCount(); ++i) {
ASTNode partspec_val = (ASTNode) partspec.getChild(i);
String key = partspec_val.getChild(0).getText();
String val = null;
if (partspec_val.getChildCount() > 1) {
val = stripQuotes(partspec_val.getChild(1).getText());
}
partSpec.put(key.toLowerCase(), val);
}
return partSpec;
}
public static HashMap getValidatedPartSpec(Table table, ASTNode astNode,
HiveConf conf, boolean shouldBeFull) throws SemanticException {
HashMap partSpec = getPartSpec(astNode);
if (partSpec != null && !partSpec.isEmpty()) {
validatePartSpec(table, partSpec, astNode, conf, shouldBeFull);
}
return partSpec;
}
private void analyzeShowPartitions(ASTNode ast) throws SemanticException {
ShowPartitionsDesc showPartsDesc;
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
List © 2015 - 2025 Weber Informatics LLC | Privacy Policy