com.hazelcast.org.apache.calcite.plan.volcano.VolcanoPlanner Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.org.apache.calcite.plan.volcano;
import com.hazelcast.org.apache.calcite.config.CalciteConnectionConfig;
import com.hazelcast.org.apache.calcite.config.CalciteSystemProperty;
import com.hazelcast.org.apache.calcite.plan.AbstractRelOptPlanner;
import com.hazelcast.org.apache.calcite.plan.Context;
import com.hazelcast.org.apache.calcite.plan.Convention;
import com.hazelcast.org.apache.calcite.plan.ConventionTraitDef;
import com.hazelcast.org.apache.calcite.plan.RelOptCost;
import com.hazelcast.org.apache.calcite.plan.RelOptCostFactory;
import com.hazelcast.org.apache.calcite.plan.RelOptLattice;
import com.hazelcast.org.apache.calcite.plan.RelOptMaterialization;
import com.hazelcast.org.apache.calcite.plan.RelOptMaterializations;
import com.hazelcast.org.apache.calcite.plan.RelOptPlanner;
import com.hazelcast.org.apache.calcite.plan.RelOptRule;
import com.hazelcast.org.apache.calcite.plan.RelOptRuleCall;
import com.hazelcast.org.apache.calcite.plan.RelOptRuleOperand;
import com.hazelcast.org.apache.calcite.plan.RelOptSchema;
import com.hazelcast.org.apache.calcite.plan.RelOptTable;
import com.hazelcast.org.apache.calcite.plan.RelOptUtil;
import com.hazelcast.org.apache.calcite.plan.RelTrait;
import com.hazelcast.org.apache.calcite.plan.RelTraitDef;
import com.hazelcast.org.apache.calcite.plan.RelTraitSet;
import com.hazelcast.org.apache.calcite.rel.PhysicalNode;
import com.hazelcast.org.apache.calcite.rel.RelNode;
import com.hazelcast.org.apache.calcite.rel.convert.Converter;
import com.hazelcast.org.apache.calcite.rel.convert.ConverterRule;
import com.hazelcast.org.apache.calcite.rel.externalize.RelWriterImpl;
import com.hazelcast.org.apache.calcite.rel.metadata.CyclicMetadataException;
import com.hazelcast.org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
import com.hazelcast.org.apache.calcite.rel.metadata.RelMdUtil;
import com.hazelcast.org.apache.calcite.rel.metadata.RelMetadataProvider;
import com.hazelcast.org.apache.calcite.rel.metadata.RelMetadataQuery;
import com.hazelcast.org.apache.calcite.rel.rules.TransformationRule;
import com.hazelcast.org.apache.calcite.rel.type.RelDataType;
import com.hazelcast.org.apache.calcite.runtime.Hook;
import com.hazelcast.org.apache.calcite.sql.SqlExplainLevel;
import com.hazelcast.org.apache.calcite.util.Litmus;
import com.hazelcast.org.apache.calcite.util.Pair;
import com.hazelcast.org.apache.calcite.util.Util;
import com.hazelcast.com.google.common.collect.ImmutableList;
import com.hazelcast.com.google.common.collect.LinkedListMultimap;
import com.hazelcast.com.google.common.collect.Multimap;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* VolcanoPlanner optimizes queries by transforming expressions selectively
* according to a dynamic programming algorithm.
*/
public class VolcanoPlanner extends AbstractRelOptPlanner {
//~ Instance fields --------------------------------------------------------
protected RelSubset root;
/**
* Operands that apply to a given class of {@link RelNode}.
*
* Any operand can be an 'entry point' to a rule call, when a RelNode is
* registered which matches the operand. This map allows us to narrow down
* operands based on the class of the RelNode.
*/
private final Multimap, RelOptRuleOperand>
classOperands = LinkedListMultimap.create();
/**
* List of all sets. Used only for debugging.
*/
final List allSets = new ArrayList<>();
/**
* Canonical map from {@link String digest} to the unique
* {@link RelNode relational expression} with that digest.
*
* Row type is part of the key for the rare occasion that similar
* expressions have different types, e.g. variants of
* {@code Project(child=rel#1, a=null)} where a is a null INTEGER or a
* null VARCHAR(10).
* Row type is represented as fieldTypes only, so {@code RelNode} that differ
* with field names only are treated equal.
* For instance, {@code Project(input=rel#1,empid=$0)} and {@code Project(input=rel#1,deptno=$0)}
* are equal
*/
private final Map>, RelNode> mapDigestToRel =
new HashMap<>();
/**
* Map each registered expression ({@link RelNode}) to its equivalence set
* ({@link RelSubset}).
*
* We use an {@link IdentityHashMap} to simplify the process of merging
* {@link RelSet} objects. Most {@link RelNode} objects are identified by
* their digest, which involves the set that their child relational
* expressions belong to. If those children belong to the same set, we have
* to be careful, otherwise it gets incestuous.
*/
private final IdentityHashMap mapRel2Subset =
new IdentityHashMap<>();
/**
* The nodes to be pruned.
*
* If a RelNode is pruned, all {@link RelOptRuleCall}s using it
* are ignored, and future RelOptRuleCalls are not queued up.
*/
final Set prunedNodes = new HashSet<>();
/**
* List of all schemas which have been registered.
*/
private final Set registeredSchemas = new HashSet<>();
/**
* Holds rule calls waiting to be fired.
*/
final RuleQueue ruleQueue = new RuleQueue(this);
/**
* Holds the currently registered RelTraitDefs.
*/
private final List traitDefs = new ArrayList<>();
private int nextSetId = 0;
private RelNode originalRoot;
/**
* Whether the planner can accept new rules.
*/
private boolean locked;
/**
* Whether rels with Convention.NONE has infinite cost.
*/
private boolean noneConventionHasInfiniteCost = true;
private final List materializations =
new ArrayList<>();
/**
* Map of lattices by the qualified name of their star table.
*/
private final Map, RelOptLattice> latticeByName =
new LinkedHashMap<>();
final Map provenanceMap;
final Deque ruleCallStack = new ArrayDeque<>();
/** Zero cost, according to {@link #costFactory}. Not necessarily a
* {@link com.hazelcast.org.apache.calcite.plan.volcano.VolcanoCost}. */
private final RelOptCost zeroCost;
/**
* Optimization tasks including trait propagation, enforcement.
*/
final Deque tasks = new ArrayDeque<>();
/**
* The id generator for optimization tasks.
*/
int nextTaskId = 0;
/**
* Whether to enable top-down optimization or not.
*/
boolean topDownOpt = CalciteSystemProperty.TOPDOWN_OPT.value();
//~ Constructors -----------------------------------------------------------
/**
* Creates a uninitialized VolcanoPlanner
. To fully initialize
* it, the caller must register the desired set of relations, rules, and
* calling conventions.
*/
public VolcanoPlanner() {
this(null, null);
}
/**
* Creates a uninitialized VolcanoPlanner
. To fully initialize
* it, the caller must register the desired set of relations, rules, and
* calling conventions.
*/
public VolcanoPlanner(Context externalContext) {
this(null, externalContext);
}
/**
* Creates a {@code VolcanoPlanner} with a given cost factory.
*/
public VolcanoPlanner(RelOptCostFactory costFactory,
Context externalContext) {
super(costFactory == null ? VolcanoCost.FACTORY : costFactory,
externalContext);
this.zeroCost = this.costFactory.makeZeroCost();
// If LOGGER is debug enabled, enable provenance information to be captured
this.provenanceMap = LOGGER.isDebugEnabled() ? new HashMap<>()
: Util.blackholeMap();
}
//~ Methods ----------------------------------------------------------------
protected VolcanoPlannerPhaseRuleMappingInitializer
getPhaseRuleMappingInitializer() {
return phaseRuleMap -> {
// Disable all phases except OPTIMIZE by adding one useless rule name.
phaseRuleMap.get(VolcanoPlannerPhase.PRE_PROCESS_MDR).add("xxx");
phaseRuleMap.get(VolcanoPlannerPhase.PRE_PROCESS).add("xxx");
phaseRuleMap.get(VolcanoPlannerPhase.CLEANUP).add("xxx");
};
}
/**
* Enable or disable top-down optimization.
*
* Note: Enabling top-down optimization will automatically disable
* the use of AbstractConverter and related rules.
*/
public void setTopDownOpt(boolean value) {
topDownOpt = value;
}
// implement RelOptPlanner
public boolean isRegistered(RelNode rel) {
return mapRel2Subset.get(rel) != null;
}
public void setRoot(RelNode rel) {
// We've registered all the rules, and therefore RelNode classes,
// we're interested in, and have not yet started calling metadata providers.
// So now is a good time to tell the metadata layer what to expect.
registerMetadataRels();
this.root = registerImpl(rel, null);
if (this.originalRoot == null) {
this.originalRoot = rel;
}
ensureRootConverters();
}
public RelNode getRoot() {
return root;
}
@Override public List getMaterializations() {
return ImmutableList.copyOf(materializations);
}
@Override public void addMaterialization(
RelOptMaterialization materialization) {
materializations.add(materialization);
}
@Override public void addLattice(RelOptLattice lattice) {
latticeByName.put(lattice.starRelOptTable.getQualifiedName(), lattice);
}
@Override public RelOptLattice getLattice(RelOptTable table) {
return latticeByName.get(table.getQualifiedName());
}
protected void registerMaterializations() {
// Avoid using materializations while populating materializations!
final CalciteConnectionConfig config =
context.unwrap(CalciteConnectionConfig.class);
if (config == null || !config.materializationsEnabled()) {
return;
}
// Register rels using materialized views.
final List>> materializationUses =
RelOptMaterializations.useMaterializedViews(originalRoot, materializations);
for (Pair> use : materializationUses) {
RelNode rel = use.left;
Hook.SUB.run(rel);
registerImpl(rel, root.set);
}
// Register table rels of materialized views that cannot find a substitution
// in root rel transformation but can potentially be useful.
final Set applicableMaterializations =
new HashSet<>(
RelOptMaterializations.getApplicableMaterializations(
originalRoot, materializations));
for (Pair> use : materializationUses) {
applicableMaterializations.removeAll(use.right);
}
for (RelOptMaterialization materialization : applicableMaterializations) {
RelSubset subset = registerImpl(materialization.queryRel, null);
RelNode tableRel2 =
RelOptUtil.createCastRel(
materialization.tableRel,
materialization.queryRel.getRowType(),
true);
registerImpl(tableRel2, subset.set);
}
// Register rels using lattices.
final List> latticeUses =
RelOptMaterializations.useLattices(
originalRoot, ImmutableList.copyOf(latticeByName.values()));
if (!latticeUses.isEmpty()) {
RelNode rel = latticeUses.get(0).left;
Hook.SUB.run(rel);
registerImpl(rel, root.set);
}
}
/**
* Finds an expression's equivalence set. If the expression is not
* registered, returns null.
*
* @param rel Relational expression
* @return Equivalence set that expression belongs to, or null if it is not
* registered
*/
public RelSet getSet(RelNode rel) {
assert rel != null : "pre: rel != null";
final RelSubset subset = getSubset(rel);
if (subset != null) {
assert subset.set != null;
return subset.set;
}
return null;
}
@Override public boolean addRelTraitDef(RelTraitDef relTraitDef) {
return !traitDefs.contains(relTraitDef) && traitDefs.add(relTraitDef);
}
@Override public void clearRelTraitDefs() {
traitDefs.clear();
}
@Override public List getRelTraitDefs() {
return traitDefs;
}
@Override public RelTraitSet emptyTraitSet() {
RelTraitSet traitSet = super.emptyTraitSet();
for (RelTraitDef traitDef : traitDefs) {
if (traitDef.multiple()) {
// TODO: restructure RelTraitSet to allow a list of entries
// for any given trait
}
traitSet = traitSet.plus(traitDef.getDefault());
}
return traitSet;
}
@Override public void clear() {
super.clear();
for (RelOptRule rule : getRules()) {
removeRule(rule);
}
this.classOperands.clear();
this.allSets.clear();
this.mapDigestToRel.clear();
this.mapRel2Subset.clear();
this.prunedNodes.clear();
this.ruleQueue.clear();
this.materializations.clear();
this.latticeByName.clear();
this.provenanceMap.clear();
}
public boolean addRule(RelOptRule rule) {
if (locked) {
return false;
}
if (!super.addRule(rule)) {
return false;
}
// Each of this rule's operands is an 'entry point' for a rule call.
// Register each operand against all concrete sub-classes that could match
// it.
for (RelOptRuleOperand operand : rule.getOperands()) {
for (Class extends RelNode> subClass
: subClasses(operand.getMatchedClass())) {
if (PhysicalNode.class.isAssignableFrom(subClass)
&& rule instanceof TransformationRule) {
continue;
}
classOperands.put(subClass, operand);
}
}
// If this is a converter rule, check that it operates on one of the
// kinds of trait we are interested in, and if so, register the rule
// with the trait.
if (rule instanceof ConverterRule) {
ConverterRule converterRule = (ConverterRule) rule;
final RelTrait ruleTrait = converterRule.getInTrait();
final RelTraitDef ruleTraitDef = ruleTrait.getTraitDef();
if (traitDefs.contains(ruleTraitDef)) {
ruleTraitDef.registerConverterRule(this, converterRule);
}
}
return true;
}
public boolean removeRule(RelOptRule rule) {
// Remove description.
if (!super.removeRule(rule)) {
// Rule was not present.
return false;
}
// Remove operands.
classOperands.values().removeIf(entry -> entry.getRule().equals(rule));
// Remove trait mappings. (In particular, entries from conversion
// graph.)
if (rule instanceof ConverterRule) {
ConverterRule converterRule = (ConverterRule) rule;
final RelTrait ruleTrait = converterRule.getInTrait();
final RelTraitDef ruleTraitDef = ruleTrait.getTraitDef();
if (traitDefs.contains(ruleTraitDef)) {
ruleTraitDef.deregisterConverterRule(this, converterRule);
}
}
return true;
}
@Override protected void onNewClass(RelNode node) {
super.onNewClass(node);
final boolean isPhysical = node instanceof PhysicalNode;
// Create mappings so that instances of this class will match existing
// operands.
final Class extends RelNode> clazz = node.getClass();
for (RelOptRule rule : mapDescToRule.values()) {
if (isPhysical && rule instanceof TransformationRule) {
continue;
}
for (RelOptRuleOperand operand : rule.getOperands()) {
if (operand.getMatchedClass().isAssignableFrom(clazz)) {
classOperands.put(clazz, operand);
}
}
}
}
public RelNode changeTraits(final RelNode rel, RelTraitSet toTraits) {
assert !rel.getTraitSet().equals(toTraits);
assert toTraits.allSimple();
RelSubset rel2 = ensureRegistered(rel, null);
if (rel2.getTraitSet().equals(toTraits)) {
return rel2;
}
return rel2.set.getOrCreateSubset(
rel.getCluster(), toTraits, true);
}
public RelOptPlanner chooseDelegate() {
return this;
}
/**
* Finds the most efficient expression to implement the query given via
* {@link com.hazelcast.org.apache.calcite.plan.RelOptPlanner#setRoot(com.hazelcast.org.apache.calcite.rel.RelNode)}.
*
* The algorithm executes repeatedly in a series of phases. In each phase
* the exact rules that may be fired varies. The mapping of phases to rule
* sets is maintained in the {@link #ruleQueue}.
*
*
In each phase, the planner then iterates over the rule matches presented
* by the rule queue until the rule queue becomes empty.
*
* @return the most efficient RelNode tree found for implementing the given
* query
*/
public RelNode findBestExp() {
ensureRootConverters();
registerMaterializations();
PLANNING:
for (VolcanoPlannerPhase phase : VolcanoPlannerPhase.values()) {
while (true) {
LOGGER.debug("PLANNER = {}; PHASE = {}; COST = {}",
this, phase.toString(), root.bestCost);
VolcanoRuleMatch match = ruleQueue.popMatch(phase);
if (match == null) {
break;
}
assert match.getRule().matches(match);
try {
match.onMatch();
} catch (VolcanoTimeoutException e) {
root = canonize(root);
ruleQueue.phaseCompleted(phase);
break PLANNING;
}
// The root may have been merged with another
// subset. Find the new root subset.
root = canonize(root);
}
ruleQueue.phaseCompleted(phase);
}
if (topDownOpt) {
tasks.push(OptimizeTask.create(root));
while (!tasks.isEmpty()) {
OptimizeTask task = tasks.peek();
if (task.hasSubTask()) {
tasks.push(task.nextSubTask());
continue;
}
task = tasks.pop();
task.execute();
}
}
if (LOGGER.isTraceEnabled()) {
StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw);
dump(pw);
pw.flush();
LOGGER.trace(sw.toString());
}
dumpRuleAttemptsInfo();
RelNode cheapest = root.buildCheapestPlan(this);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Cheapest plan:\n{}", RelOptUtil.toString(cheapest, SqlExplainLevel.ALL_ATTRIBUTES));
if (!provenanceMap.isEmpty()) {
LOGGER.debug("Provenance:\n{}", Dumpers.provenance(provenanceMap, cheapest));
}
}
return cheapest;
}
@Override public void checkCancel() {
if (cancelFlag.get()) {
throw new VolcanoTimeoutException();
}
}
/** Informs {@link JaninoRelMetadataProvider} about the different kinds of
* {@link RelNode} that we will be dealing with. It will reduce the number
* of times that we need to re-generate the provider. */
private void registerMetadataRels() {
JaninoRelMetadataProvider.DEFAULT.register(classOperands.keySet());
}
/** Ensures that the subset that is the root relational expression contains
* converters to all other subsets in its equivalence set.
*
*
Thus the planner tries to find cheap implementations of those other
* subsets, which can then be converted to the root. This is the only place
* in the plan where explicit converters are required; elsewhere, a consumer
* will be asking for the result in a particular convention, but the root has
* no consumers. */
void ensureRootConverters() {
final Set subsets = new HashSet<>();
for (RelNode rel : root.getRels()) {
if (rel instanceof AbstractConverter) {
subsets.add((RelSubset) ((AbstractConverter) rel).getInput());
}
}
for (RelSubset subset : root.set.subsets) {
final ImmutableList difference =
root.getTraitSet().difference(subset.getTraitSet());
if (difference.size() == 1 && subsets.add(subset)) {
register(
new AbstractConverter(subset.getCluster(), subset,
difference.get(0).getTraitDef(), root.getTraitSet()),
root);
}
}
}
public RelSubset register(
RelNode rel,
RelNode equivRel) {
assert !isRegistered(rel) : "pre: isRegistered(rel)";
final RelSet set;
if (equivRel == null) {
set = null;
} else {
assert RelOptUtil.equal(
"rel rowtype",
rel.getRowType(),
"equivRel rowtype",
equivRel.getRowType(),
Litmus.THROW);
set = getSet(equivRel);
}
return registerImpl(rel, set);
}
public RelSubset ensureRegistered(RelNode rel, RelNode equivRel) {
RelSubset result;
final RelSubset subset = getSubset(rel);
if (subset != null) {
if (equivRel != null) {
final RelSubset equivSubset = getSubset(equivRel);
if (subset.set != equivSubset.set) {
merge(equivSubset.set, subset.set);
}
}
result = subset;
} else {
result = register(rel, equivRel);
}
// Checking if tree is valid considerably slows down planning
// Only doing it if logger level is debug or finer
if (LOGGER.isDebugEnabled()) {
assert isValid(Litmus.THROW);
}
return result;
}
/**
* Checks internal consistency.
*/
protected boolean isValid(Litmus litmus) {
if (this.getRoot() == null) {
return true;
}
RelMetadataQuery metaQuery = this.getRoot().getCluster().getMetadataQuerySupplier().get();
for (RelSet set : allSets) {
if (set.equivalentSet != null) {
return litmus.fail("set [{}] has been merged: it should not be in the list", set);
}
for (RelSubset subset : set.subsets) {
if (subset.set != set) {
return litmus.fail("subset [{}] is in wrong set [{}]",
subset, set);
}
if (subset.best != null) {
// Make sure best RelNode is valid
if (!subset.set.rels.contains(subset.best)) {
return litmus.fail("RelSubset [{}] does not contain its best RelNode [{}]",
subset, subset.best);
}
// Make sure bestCost is up-to-date
try {
RelOptCost bestCost = getCost(subset.best, metaQuery);
if (!subset.bestCost.equals(bestCost)) {
return litmus.fail("RelSubset [" + subset
+ "] has wrong best cost "
+ subset.bestCost + ". Correct cost is " + bestCost);
}
} catch (CyclicMetadataException e) {
// ignore
}
}
for (RelNode rel : subset.getRels()) {
try {
RelOptCost relCost = getCost(rel, metaQuery);
if (relCost.isLt(subset.bestCost)) {
return litmus.fail("rel [{}] has lower cost {} than "
+ "best cost {} of subset [{}]",
rel, relCost, subset.bestCost, subset);
}
} catch (CyclicMetadataException e) {
// ignore
}
}
}
}
return litmus.succeed();
}
public void registerAbstractRelationalRules() {
RelOptUtil.registerAbstractRelationalRules(this);
}
public void registerSchema(RelOptSchema schema) {
if (registeredSchemas.add(schema)) {
try {
schema.registerRules(this);
} catch (Exception e) {
throw new AssertionError("While registering schema " + schema, e);
}
}
}
/**
* Sets whether this planner should consider rel nodes with Convention.NONE
* to have infinite cost or not.
* @param infinite Whether to make none convention rel nodes infinite cost
*/
public void setNoneConventionHasInfiniteCost(boolean infinite) {
this.noneConventionHasInfiniteCost = infinite;
}
public RelOptCost getCost(RelNode rel, RelMetadataQuery mq) {
assert rel != null : "pre-condition: rel != null";
if (rel instanceof RelSubset) {
return ((RelSubset) rel).bestCost;
}
if (noneConventionHasInfiniteCost
&& rel.getTraitSet().getTrait(ConventionTraitDef.INSTANCE) == Convention.NONE) {
return costFactory.makeInfiniteCost();
}
RelOptCost cost = mq.getNonCumulativeCost(rel);
if (!zeroCost.isLt(cost)) {
// cost must be positive, so nudge it
cost = costFactory.makeTinyCost();
}
for (RelNode input : rel.getInputs()) {
cost = cost.plus(getCost(input, mq));
}
return cost;
}
/**
* Returns the subset that a relational expression belongs to.
*
* @param rel Relational expression
* @return Subset it belongs to, or null if it is not registered
*/
public RelSubset getSubset(RelNode rel) {
assert rel != null : "pre: rel != null";
if (rel instanceof RelSubset) {
return (RelSubset) rel;
} else {
return mapRel2Subset.get(rel);
}
}
public RelSubset getSubset(
RelNode rel,
RelTraitSet traits) {
return getSubset(rel, traits, false);
}
public RelSubset getSubset(
RelNode rel,
RelTraitSet traits,
boolean createIfMissing) {
if ((rel instanceof RelSubset) && (rel.getTraitSet().equals(traits))) {
return (RelSubset) rel;
}
RelSet set = getSet(rel);
if (set == null) {
return null;
}
if (createIfMissing) {
return set.getOrCreateSubset(rel.getCluster(), traits);
}
return set.getSubset(traits);
}
boolean isSeedNode(RelNode node) {
final RelSet set = getSubset(node).set;
return set.seeds.contains(node);
}
RelNode changeTraitsUsingConverters(
RelNode rel,
RelTraitSet toTraits) {
final RelTraitSet fromTraits = rel.getTraitSet();
assert fromTraits.size() >= toTraits.size();
final boolean allowInfiniteCostConverters =
CalciteSystemProperty.ALLOW_INFINITE_COST_CONVERTERS.value();
// Traits may build on top of another...for example a collation trait
// would typically come after a distribution trait since distribution
// destroys collation; so when doing the conversion below we use
// fromTraits as the trait of the just previously converted RelNode.
// Also, toTraits may have fewer traits than fromTraits, excess traits
// will be left as is. Finally, any null entries in toTraits are
// ignored.
RelNode converted = rel;
for (int i = 0; (converted != null) && (i < toTraits.size()); i++) {
RelTrait fromTrait = converted.getTraitSet().getTrait(i);
final RelTraitDef traitDef = fromTrait.getTraitDef();
RelTrait toTrait = toTraits.getTrait(i);
if (toTrait == null) {
continue;
}
assert traitDef == toTrait.getTraitDef();
if (fromTrait.equals(toTrait)) {
// No need to convert; it's already correct.
continue;
}
rel =
traitDef.convert(
this,
converted,
toTrait,
allowInfiniteCostConverters);
if (rel != null) {
assert rel.getTraitSet().getTrait(traitDef).satisfies(toTrait);
register(rel, converted);
}
converted = rel;
}
// make sure final converted traitset subsumes what was required
if (converted != null) {
assert converted.getTraitSet().satisfies(toTraits);
}
return converted;
}
@Deprecated // to be removed before 1.24
public void setImportance(RelNode rel, double importance) {
assert rel != null;
if (importance == 0d) {
prunedNodes.add(rel);
}
}
@Override public void prune(RelNode rel) {
prunedNodes.add(rel);
}
/**
* Dumps the internal state of this VolcanoPlanner to a writer.
*
* @param pw Print writer
* @see #normalizePlan(String)
*/
public void dump(PrintWriter pw) {
pw.println("Root: " + root);
pw.println("Original rel:");
if (originalRoot != null) {
originalRoot.explain(
new RelWriterImpl(pw, SqlExplainLevel.ALL_ATTRIBUTES, false));
}
try {
if (CalciteSystemProperty.DUMP_SETS.value()) {
pw.println();
pw.println("Sets:");
Dumpers.dumpSets(this, pw);
}
if (CalciteSystemProperty.DUMP_GRAPHVIZ.value()) {
pw.println();
pw.println("Graphviz:");
Dumpers.dumpGraphviz(this, pw);
}
} catch (Exception | AssertionError e) {
pw.println("Error when dumping plan state: \n"
+ e);
}
}
public String toDot() {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
Dumpers.dumpGraphviz(this, pw);
pw.flush();
return sw.toString();
}
/**
* Re-computes the digest of a {@link RelNode}.
*
* Since a relational expression's digest contains the identifiers of its
* children, this method needs to be called when the child has been renamed,
* for example if the child's set merges with another.
*
* @param rel Relational expression
*/
void rename(RelNode rel) {
final String oldDigest = rel.getDigest();
if (fixUpInputs(rel)) {
final Pair> oldKey = key(oldDigest, rel.getRowType());
final RelNode removed = mapDigestToRel.remove(oldKey);
assert removed == rel;
final String newDigest = rel.recomputeDigest();
LOGGER.trace("Rename #{} from '{}' to '{}'", rel.getId(), oldDigest, newDigest);
final Pair> key = key(rel);
final RelNode equivRel = mapDigestToRel.put(key, rel);
if (equivRel != null) {
assert equivRel != rel;
// There's already an equivalent with the same name, and we
// just knocked it out. Put it back, and forget about 'rel'.
LOGGER.trace("After renaming rel#{} it is now equivalent to rel#{}",
rel.getId(), equivRel.getId());
mapDigestToRel.put(key, equivRel);
checkPruned(equivRel, rel);
RelSubset equivRelSubset = getSubset(equivRel);
// Remove back-links from children.
for (RelNode input : rel.getInputs()) {
((RelSubset) input).set.parents.remove(rel);
}
// Remove rel from its subset. (This may leave the subset
// empty, but if so, that will be dealt with when the sets
// get merged.)
final RelSubset subset = mapRel2Subset.put(rel, equivRelSubset);
assert subset != null;
boolean existed = subset.set.rels.remove(rel);
assert existed : "rel was not known to its set";
final RelSubset equivSubset = getSubset(equivRel);
for (RelSubset s : subset.set.subsets) {
if (s.best == rel) {
Set activeSet = new HashSet<>();
s.best = equivRel;
// Propagate cost improvement since this potentially would change the subset's best cost
s.propagateCostImprovements(
this, equivRel.getCluster().getMetadataQuery(),
equivRel, activeSet);
}
}
if (equivSubset != subset) {
// The equivalent relational expression is in a different
// subset, therefore the sets are equivalent.
assert equivSubset.getTraitSet().equals(
subset.getTraitSet());
assert equivSubset.set != subset.set;
merge(equivSubset.set, subset.set);
}
}
}
}
/**
* Registers a {@link RelNode}, which has already been registered, in a new
* {@link RelSet}.
*
* @param set Set
* @param rel Relational expression
*/
void reregister(
RelSet set,
RelNode rel) {
// Is there an equivalent relational expression? (This might have
// just occurred because the relational expression's child was just
// found to be equivalent to another set.)
final Pair> key = key(rel);
RelNode equivRel = mapDigestToRel.get(key);
if (equivRel != null && equivRel != rel) {
assert equivRel.getClass() == rel.getClass();
assert equivRel.getTraitSet().equals(rel.getTraitSet());
checkPruned(equivRel, rel);
return;
}
// Add the relational expression into the correct set and subset.
if (!prunedNodes.contains(rel)) {
addRelToSet(rel, set);
}
}
/**
* Prune rel node if the latter one (identical with rel node)
* is already pruned.
*/
private void checkPruned(RelNode rel, RelNode duplicateRel) {
if (prunedNodes.contains(duplicateRel)) {
prunedNodes.add(rel);
}
}
/**
* If a subset has one or more equivalent subsets (owing to a set having
* merged with another), returns the subset which is the leader of the
* equivalence class.
*
* @param subset Subset
* @return Leader of subset's equivalence class
*/
private RelSubset canonize(final RelSubset subset) {
if (subset.set.equivalentSet == null) {
return subset;
}
RelSet set = subset.set;
do {
set = set.equivalentSet;
} while (set.equivalentSet != null);
return set.getOrCreateSubset(
subset.getCluster(), subset.getTraitSet());
}
/**
* Fires all rules matched by a relational expression.
*
* @param rel Relational expression which has just been created (or maybe
* from the queue)
*/
void fireRules(RelNode rel) {
for (RelOptRuleOperand operand : classOperands.get(rel.getClass())) {
if (operand.matches(rel)) {
final VolcanoRuleCall ruleCall;
ruleCall = new DeferringRuleCall(this, operand);
ruleCall.match(rel);
}
}
}
private boolean fixUpInputs(RelNode rel) {
List inputs = rel.getInputs();
int i = -1;
int changeCount = 0;
for (RelNode input : inputs) {
++i;
if (input instanceof RelSubset) {
final RelSubset subset = (RelSubset) input;
RelSubset newSubset = canonize(subset);
if (newSubset != subset) {
rel.replaceInput(i, newSubset);
if (subset.set != newSubset.set) {
subset.set.parents.remove(rel);
newSubset.set.parents.add(rel);
}
changeCount++;
}
}
}
RelMdUtil.clearCache(rel);
return changeCount > 0;
}
private RelSet merge(RelSet set, RelSet set2) {
assert set != set2 : "pre: set != set2";
// Find the root of set2's equivalence tree.
set = equivRoot(set);
set2 = equivRoot(set2);
// Looks like set2 was already marked as equivalent to set. Nothing
// to do.
if (set2 == set) {
return set;
}
// If necessary, swap the sets, so we're always merging the newer set
// into the older or merging parent set into child set.
if (set2.getChildSets(this).contains(set)) {
// No-op
} else if (set.getChildSets(this).contains(set2)
|| set.id > set2.id) {
RelSet t = set;
set = set2;
set2 = t;
}
// Merge.
set.mergeWith(this, set2);
// Was the set we merged with the root? If so, the result is the new
// root.
if (set2 == getSet(root)) {
root =
set.getOrCreateSubset(
root.getCluster(),
root.getTraitSet());
ensureRootConverters();
}
return set;
}
static RelSet equivRoot(RelSet s) {
RelSet p = s; // iterates at twice the rate, to detect cycles
while (s.equivalentSet != null) {
p = forward2(s, p);
s = s.equivalentSet;
}
return s;
}
/** Moves forward two links, checking for a cycle at each. */
private static RelSet forward2(RelSet s, RelSet p) {
p = forward1(s, p);
p = forward1(s, p);
return p;
}
/** Moves forward one link, checking for a cycle. */
private static RelSet forward1(RelSet s, RelSet p) {
if (p != null) {
p = p.equivalentSet;
if (p == s) {
throw new AssertionError("cycle in equivalence tree");
}
}
return p;
}
/**
* Registers a new expression exp
and queues up rule matches.
* If set
is not null, makes the expression part of that
* equivalence set. If an identical expression is already registered, we
* don't need to register this one and nor should we queue up rule matches.
*
* @param rel relational expression to register. Must be either a
* {@link RelSubset}, or an unregistered {@link RelNode}
* @param set set that rel belongs to, or null
* @return the equivalence-set
*/
private RelSubset registerImpl(
RelNode rel,
RelSet set) {
if (rel instanceof RelSubset) {
return registerSubset(set, (RelSubset) rel);
}
assert !isRegistered(rel) : "already been registered: " + rel;
if (rel.getCluster().getPlanner() != this) {
throw new AssertionError("Relational expression " + rel
+ " belongs to a different planner than is currently being used.");
}
// Now is a good time to ensure that the relational expression
// implements the interface required by its calling convention.
final RelTraitSet traits = rel.getTraitSet();
final Convention convention = traits.getTrait(ConventionTraitDef.INSTANCE);
assert convention != null;
if (!convention.getInterface().isInstance(rel)
&& !(rel instanceof Converter)) {
throw new AssertionError("Relational expression " + rel
+ " has calling-convention " + convention
+ " but does not implement the required interface '"
+ convention.getInterface() + "' of that convention");
}
if (traits.size() != traitDefs.size()) {
throw new AssertionError("Relational expression " + rel
+ " does not have the correct number of traits: " + traits.size()
+ " != " + traitDefs.size());
}
// Ensure that its sub-expressions are registered.
rel = rel.onRegister(this);
// Record its provenance. (Rule call may be null.)
if (ruleCallStack.isEmpty()) {
provenanceMap.put(rel, Provenance.EMPTY);
} else {
final VolcanoRuleCall ruleCall = ruleCallStack.peek();
provenanceMap.put(
rel,
new RuleProvenance(
ruleCall.rule,
ImmutableList.copyOf(ruleCall.rels),
ruleCall.id));
}
// If it is equivalent to an existing expression, return the set that
// the equivalent expression belongs to.
Pair> key = key(rel);
RelNode equivExp = mapDigestToRel.get(key);
if (equivExp == null) {
// do nothing
} else if (equivExp == rel) {
return getSubset(rel);
} else {
assert RelOptUtil.equal(
"left", equivExp.getRowType(),
"right", rel.getRowType(),
Litmus.THROW);
checkPruned(equivExp, rel);
RelSet equivSet = getSet(equivExp);
if (equivSet != null) {
LOGGER.trace(
"Register: rel#{} is equivalent to {}", rel.getId(), equivExp);
return registerSubset(set, getSubset(equivExp));
}
}
// Converters are in the same set as their children.
if (rel instanceof Converter) {
final RelNode input = ((Converter) rel).getInput();
final RelSet childSet = getSet(input);
if ((set != null)
&& (set != childSet)
&& (set.equivalentSet == null)) {
LOGGER.trace(
"Register #{} {} (and merge sets, because it is a conversion)",
rel.getId(), rel.getDigest());
merge(set, childSet);
// During the mergers, the child set may have changed, and since
// we're not registered yet, we won't have been informed. So
// check whether we are now equivalent to an existing
// expression.
if (fixUpInputs(rel)) {
rel.recomputeDigest();
key = key(rel);
RelNode equivRel = mapDigestToRel.get(key);
if ((equivRel != rel) && (equivRel != null)) {
// make sure this bad rel didn't get into the
// set in any way (fixupInputs will do this but it
// doesn't know if it should so it does it anyway)
set.obliterateRelNode(rel);
// There is already an equivalent expression. Use that
// one, and forget about this one.
return getSubset(equivRel);
}
}
} else {
set = childSet;
}
}
// Place the expression in the appropriate equivalence set.
if (set == null) {
set = new RelSet(
nextSetId++,
Util.minus(
RelOptUtil.getVariablesSet(rel),
rel.getVariablesSet()),
RelOptUtil.getVariablesUsed(rel));
this.allSets.add(set);
}
// Chain to find 'live' equivalent set, just in case several sets are
// merging at the same time.
while (set.equivalentSet != null) {
set = set.equivalentSet;
}
// Allow each rel to register its own rules.
registerClass(rel);
final int subsetBeforeCount = set.subsets.size();
RelSubset subset = addRelToSet(rel, set);
final RelNode xx = mapDigestToRel.put(key, rel);
assert xx == null || xx == rel : rel.getDigest();
LOGGER.trace("Register {} in {}", rel, subset);
// This relational expression may have been registered while we
// recursively registered its children. If this is the case, we're done.
if (xx != null) {
return subset;
}
for (RelNode input : rel.getInputs()) {
RelSubset childSubset = (RelSubset) input;
childSubset.set.parents.add(rel);
}
// Queue up all rules triggered by this relexp's creation.
fireRules(rel);
// It's a new subset.
if (set.subsets.size() > subsetBeforeCount
|| subset.triggerRule) {
fireRules(subset);
}
return subset;
}
private RelSubset addRelToSet(RelNode rel, RelSet set) {
RelSubset subset = set.add(rel);
mapRel2Subset.put(rel, subset);
// While a tree of RelNodes is being registered, sometimes nodes' costs
// improve and the subset doesn't hear about it. You can end up with
// a subset with a single rel of cost 99 which thinks its best cost is
// 100. We think this happens because the back-links to parents are
// not established. So, give the subset another chance to figure out
// its cost.
final RelMetadataQuery mq = rel.getCluster().getMetadataQuery();
try {
subset.propagateCostImprovements(this, mq, rel, new HashSet<>());
} catch (CyclicMetadataException e) {
// ignore
}
return subset;
}
private RelSubset registerSubset(
RelSet set,
RelSubset subset) {
if ((set != subset.set)
&& (set != null)
&& (set.equivalentSet == null)) {
LOGGER.trace("Register #{} {}, and merge sets", subset.getId(), subset);
merge(set, subset.set);
}
return subset;
}
// implement RelOptPlanner
public void registerMetadataProviders(List list) {
list.add(0, new VolcanoRelMetadataProvider());
}
// implement RelOptPlanner
public long getRelMetadataTimestamp(RelNode rel) {
RelSubset subset = getSubset(rel);
if (subset == null) {
return 0;
} else {
return subset.timestamp;
}
}
/**
* Normalizes references to subsets within the string representation of a
* plan.
*
* This is useful when writing tests: it helps to ensure that tests don't
* break when an extra rule is introduced that generates a new subset and
* causes subsequent subset numbers to be off by one.
*
*
For example,
*
*
* FennelAggRel.FENNEL_EXEC(child=Subset#17.FENNEL_EXEC,groupCount=1,
* EXPR$1=COUNT())
* FennelSortRel.FENNEL_EXEC(child=Subset#2.FENNEL_EXEC,
* key=[0], discardDuplicates=false)
* FennelCalcRel.FENNEL_EXEC(
* child=Subset#4.FENNEL_EXEC, expr#0..8={inputs}, expr#9=3456,
* DEPTNO=$t7, $f0=$t9)
* MockTableImplRel.FENNEL_EXEC(
* table=[CATALOG, SALES, EMP])
*
* becomes
*
*
* FennelAggRel.FENNEL_EXEC(child=Subset#{0}.FENNEL_EXEC, groupCount=1,
* EXPR$1=COUNT())
* FennelSortRel.FENNEL_EXEC(child=Subset#{1}.FENNEL_EXEC,
* key=[0], discardDuplicates=false)
* FennelCalcRel.FENNEL_EXEC(
* child=Subset#{2}.FENNEL_EXEC,expr#0..8={inputs},expr#9=3456,DEPTNO=$t7,
* $f0=$t9)
* MockTableImplRel.FENNEL_EXEC(
* table=[CATALOG, SALES, EMP])
*
* @param plan Plan
* @return Normalized plan
*/
public static String normalizePlan(String plan) {
if (plan == null) {
return null;
}
final Pattern poundDigits = Pattern.compile("Subset#[0-9]+\\.");
int i = 0;
while (true) {
final Matcher matcher = poundDigits.matcher(plan);
if (!matcher.find()) {
return plan;
}
final String token = matcher.group(); // e.g. "Subset#23."
plan = plan.replace(token, "Subset#{" + i++ + "}.");
}
}
/**
* Sets whether this planner is locked. A locked planner does not accept
* new rules. {@link #addRule(com.hazelcast.org.apache.calcite.plan.RelOptRule)} will do
* nothing and return false.
*
* @param locked Whether planner is locked
*/
public void setLocked(boolean locked) {
this.locked = locked;
}
//~ Inner Classes ----------------------------------------------------------
/**
* A rule call which defers its actions. Whereas {@link RelOptRuleCall}
* invokes the rule when it finds a match, a DeferringRuleCall
* creates a {@link VolcanoRuleMatch} which can be invoked later.
*/
private static class DeferringRuleCall extends VolcanoRuleCall {
DeferringRuleCall(
VolcanoPlanner planner,
RelOptRuleOperand operand) {
super(planner, operand);
}
/**
* Rather than invoking the rule (as the base method does), creates a
* {@link VolcanoRuleMatch} which can be invoked later.
*/
protected void onMatch() {
final VolcanoRuleMatch match =
new VolcanoRuleMatch(
volcanoPlanner,
getOperand0(),
rels,
nodeInputs);
volcanoPlanner.ruleQueue.addMatch(match);
}
}
/**
* Where a RelNode came from.
*/
abstract static class Provenance {
public static final Provenance EMPTY = new UnknownProvenance();
}
/**
* We do not know where this RelNode came from. Probably created by hand,
* or by sql-to-rel converter.
*/
private static class UnknownProvenance extends Provenance {
}
/**
* A RelNode that came directly from another RelNode via a copy.
*/
static class DirectProvenance extends Provenance {
final RelNode source;
DirectProvenance(RelNode source) {
this.source = source;
}
}
/**
* A RelNode that came via the firing of a rule.
*/
static class RuleProvenance extends Provenance {
final RelOptRule rule;
final ImmutableList rels;
final int callId;
RuleProvenance(RelOptRule rule, ImmutableList rels, int callId) {
this.rule = rule;
this.rels = rels;
this.callId = callId;
}
}
}