Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.optimizer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.FetchTask;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.GroupByOperator;
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.exec.SelectOperator;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.GraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.AggregationDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.FetchWork;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hive.common.util.AnnotationUtils;
import org.apache.thrift.TException;
import com.google.common.collect.Lists;
/** There is a set of queries which can be answered entirely from statistics stored in metastore.
* Examples of such queries are count(*), count(a), max(a), min(b) etc. Hive already collects
* these basic statistics for query planning purposes. These same statistics can be used to
* answer queries also.
*
* Optimizer looks at query plan to determine if it can answer query using statistics
* and than change the plan to answer query entirely using statistics stored in metastore.
*/
public class StatsOptimizer implements Transform {
// TODO: [HIVE-6289] while getting stats from metastore, we currently only get one col at
// a time; this could be improved - get all necessary columns in advance, then use local.
// TODO: [HIVE-6292] aggregations could be done directly in metastore. Hive over MySQL!
private static final Log Log = LogFactory.getLog(StatsOptimizer.class);
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
if (pctx.getFetchTask() != null || !pctx.getQB().getIsQuery() ||
pctx.getQB().isAnalyzeRewrite() || pctx.getQB().isCTAS() ||
pctx.getLoadFileWork().size() > 1 || !pctx.getLoadTableWork().isEmpty()) {
return pctx;
}
String TS = TableScanOperator.getOperatorName() + "%";
String GBY = GroupByOperator.getOperatorName() + "%";
String RS = ReduceSinkOperator.getOperatorName() + "%";
String SEL = SelectOperator.getOperatorName() + "%";
String FS = FileSinkOperator.getOperatorName() + "%";
Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", TS + SEL + GBY + RS + GBY + SEL + FS),
new MetaDataProcessor(pctx));
Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
private static class MetaDataProcessor implements NodeProcessor {
private final ParseContext pctx;
public MetaDataProcessor (ParseContext pctx) {
this.pctx = pctx;
}
enum StatType{
Integeral,
Double,
String,
Boolean,
Binary,
Unsupported
}
private StatType getType(String origType) {
if (serdeConstants.IntegralTypes.contains(origType)) {
return StatType.Integeral;
} else if (origType.equals(serdeConstants.DOUBLE_TYPE_NAME) ||
origType.equals(serdeConstants.FLOAT_TYPE_NAME)) {
return StatType.Double;
} else if (origType.equals(serdeConstants.BINARY_TYPE_NAME)) {
return StatType.Binary;
} else if (origType.equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
return StatType.Boolean;
} else if (origType.equals(serdeConstants.STRING_TYPE_NAME)) {
return StatType.String;
}
return StatType.Unsupported;
}
private Long getNullcountFor(StatType type, ColumnStatisticsData statData) {
switch(type) {
case Integeral :
return statData.getLongStats().getNumNulls();
case Double:
return statData.getDoubleStats().getNumNulls();
case String:
return statData.getStringStats().getNumNulls();
case Boolean:
return statData.getBooleanStats().getNumNulls();
case Binary:
return statData.getBinaryStats().getNumNulls();
default:
return null;
}
}
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
// 1. Do few checks to determine eligibility of optimization
// 2. look at ExprNodeFuncGenericDesc in select list to see if its min, max, count etc.
// If it is
// 3. Connect to metastore and get the stats
// 4. Compose rows and add it in FetchWork
// 5. Delete GBY - RS - GBY - SEL from the pipeline.
try {
TableScanOperator tsOp = (TableScanOperator) stack.get(0);
if(tsOp.getParentOperators() != null && tsOp.getParentOperators().size() > 0) {
// looks like a subq plan.
return null;
}
SelectOperator selOp = (SelectOperator)tsOp.getChildren().get(0);
for(ExprNodeDesc desc : selOp.getConf().getColList()) {
if (!((desc instanceof ExprNodeColumnDesc) || (desc instanceof ExprNodeConstantDesc))) {
// Probably an expression, cant handle that
return null;
}
}
Map exprMap = selOp.getColumnExprMap();
// Since we have done an exact match on TS-SEL-GBY-RS-GBY-SEL-FS
// we need not to do any instanceof checks for following.
GroupByOperator gbyOp = (GroupByOperator)selOp.getChildren().get(0);
ReduceSinkOperator rsOp = (ReduceSinkOperator)gbyOp.getChildren().get(0);
if (rsOp.getConf().getDistinctColumnIndices().size() > 0) {
// we can't handle distinct
return null;
}
selOp = (SelectOperator)rsOp.getChildOperators().get(0).getChildOperators().get(0);
List aggrs = gbyOp.getConf().getAggregators();
if (!(selOp.getConf().getColList().size() == aggrs.size())) {
// all select columns must be aggregations
return null;
}
for(ExprNodeDesc desc : selOp.getConf().getColList()) {
if (!(desc instanceof ExprNodeColumnDesc)) {
// Probably an expression, cant handle that
return null;
}
}
FileSinkOperator fsOp = (FileSinkOperator)(selOp.getChildren().get(0));
if (fsOp.getChildOperators() != null && fsOp.getChildOperators().size() > 0) {
// looks like a subq plan.
return null;
}
Table tbl = pctx.getTopToTable().get(tsOp);
List