All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.druid.sql.calcite.planner.DruidPlanner Maven / Gradle / Ivy

There is a newer version: 30.0.1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

package org.apache.druid.sql.calcite.planner;

import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.primitives.Ints;
import org.apache.calcite.DataContext;
import org.apache.calcite.adapter.java.JavaTypeFactory;
import org.apache.calcite.interpreter.BindableConvention;
import org.apache.calcite.interpreter.BindableRel;
import org.apache.calcite.interpreter.Bindables;
import org.apache.calcite.linq4j.Enumerable;
import org.apache.calcite.linq4j.Enumerator;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelOptUtil;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.RelRoot;
import org.apache.calcite.rel.core.Sort;
import org.apache.calcite.rel.logical.LogicalSort;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlExplain;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.type.BasicSqlType;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.tools.Planner;
import org.apache.calcite.tools.RelConversionException;
import org.apache.calcite.tools.ValidationException;
import org.apache.calcite.util.Pair;
import org.apache.druid.java.util.common.guava.BaseSequence;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.guava.Sequences;
import org.apache.druid.segment.DimensionHandlerUtils;
import org.apache.druid.sql.calcite.rel.DruidConvention;
import org.apache.druid.sql.calcite.rel.DruidRel;

import javax.annotation.Nullable;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;

public class DruidPlanner implements Closeable
{
  private final Planner planner;
  private final PlannerContext plannerContext;
  private RexBuilder rexBuilder;

  public DruidPlanner(
      final Planner planner,
      final PlannerContext plannerContext
  )
  {
    this.planner = planner;
    this.plannerContext = plannerContext;
  }

  public PlannerResult plan(final String sql)
      throws SqlParseException, ValidationException, RelConversionException
  {
    SqlExplain explain = null;
    SqlNode parsed = planner.parse(sql);
    if (parsed.getKind() == SqlKind.EXPLAIN) {
      explain = (SqlExplain) parsed;
      parsed = explain.getExplicandum();
    }
    // the planner's type factory is not available until after parsing
    this.rexBuilder = new RexBuilder(planner.getTypeFactory());

    final SqlNode validated = planner.validate(parsed);
    final RelRoot root = planner.rel(validated);

    try {
      return planWithDruidConvention(explain, root);
    }
    catch (RelOptPlanner.CannotPlanException e) {
      // Try again with BINDABLE convention. Used for querying Values and metadata tables.
      try {
        return planWithBindableConvention(explain, root);
      }
      catch (Exception e2) {
        e.addSuppressed(e2);
        throw e;
      }
    }
  }

  public PlannerContext getPlannerContext()
  {
    return plannerContext;
  }

  @Override
  public void close()
  {
    planner.close();
  }

  private PlannerResult planWithDruidConvention(
      final SqlExplain explain,
      final RelRoot root
  ) throws RelConversionException
  {
    final RelNode possiblyWrappedRootRel = possiblyWrapRootWithOuterLimitFromContext(root);

    final DruidRel druidRel = (DruidRel) planner.transform(
        Rules.DRUID_CONVENTION_RULES,
        planner.getEmptyTraitSet()
               .replace(DruidConvention.instance())
               .plus(root.collation),
        possiblyWrappedRootRel
    );

    final Set dataSourceNames = ImmutableSet.copyOf(druidRel.getDataSourceNames());

    if (explain != null) {
      return planExplanation(druidRel, explain, dataSourceNames);
    } else {
      final Supplier> resultsSupplier = () -> {
        if (root.isRefTrivial()) {
          return druidRel.runQuery();
        } else {
          // Add a mapping on top to accommodate root.fields.
          return Sequences.map(
              druidRel.runQuery(),
              input -> {
                final Object[] retVal = new Object[root.fields.size()];
                for (int i = 0; i < root.fields.size(); i++) {
                  retVal[i] = input[root.fields.get(i).getKey()];
                }
                return retVal;
              }
          );
        }
      };

      return new PlannerResult(resultsSupplier, root.validatedRowType, dataSourceNames);
    }
  }

  private PlannerResult planWithBindableConvention(
      final SqlExplain explain,
      final RelRoot root
  ) throws RelConversionException
  {
    BindableRel bindableRel = (BindableRel) planner.transform(
        Rules.BINDABLE_CONVENTION_RULES,
        planner.getEmptyTraitSet()
               .replace(BindableConvention.INSTANCE)
               .plus(root.collation),
        root.rel
    );

    if (!root.isRefTrivial()) {
      // Add a projection on top to accommodate root.fields.
      final List projects = new ArrayList<>();
      final RexBuilder rexBuilder = bindableRel.getCluster().getRexBuilder();
      for (int field : Pair.left(root.fields)) {
        projects.add(rexBuilder.makeInputRef(bindableRel, field));
      }
      bindableRel = new Bindables.BindableProject(
          bindableRel.getCluster(),
          bindableRel.getTraitSet(),
          bindableRel,
          projects,
          root.validatedRowType
      );
    }

    if (explain != null) {
      return planExplanation(bindableRel, explain, ImmutableSet.of());
    } else {
      final BindableRel theRel = bindableRel;
      final DataContext dataContext = plannerContext.createDataContext((JavaTypeFactory) planner.getTypeFactory());
      final Supplier> resultsSupplier = () -> {
        final Enumerable enumerable = theRel.bind(dataContext);
        final Enumerator enumerator = enumerable.enumerator();
        return Sequences.withBaggage(new BaseSequence<>(
            new BaseSequence.IteratorMaker>()
            {
              @Override
              public EnumeratorIterator make()
              {
                return new EnumeratorIterator<>(new Iterator()
                {
                  @Override
                  public boolean hasNext()
                  {
                    return enumerator.moveNext();
                  }

                  @Override
                  public Object[] next()
                  {
                    return (Object[]) enumerator.current();
                  }
                });
              }

              @Override
              public void cleanup(EnumeratorIterator iterFromMake)
              {

              }
            }
        ), enumerator::close);
      };
      return new PlannerResult(resultsSupplier, root.validatedRowType, ImmutableSet.of());
    }
  }

  /**
   * This method wraps the root with a {@link LogicalSort} that applies a limit (no ordering change). If the outer rel
   * is already a {@link Sort}, we can merge our outerLimit into it, similar to what is going on in
   * {@link org.apache.druid.sql.calcite.rule.SortCollapseRule}.
   *
   * The {@link PlannerContext#CTX_SQL_OUTER_LIMIT} flag that controls this wrapping is meant for internal use only by
   * the web console, allowing it to apply a limit to queries without rewriting the original SQL.
   *
   * @param root root node
   * @return root node wrapped with a limiting logical sort if a limit is specified in the query context.
   */
  @Nullable
  private RelNode possiblyWrapRootWithOuterLimitFromContext(
      RelRoot root
  )
  {
    Object outerLimitObj = plannerContext.getQueryContext().get(PlannerContext.CTX_SQL_OUTER_LIMIT);
    Long outerLimit = DimensionHandlerUtils.convertObjectToLong(outerLimitObj, true);
    if (outerLimit == null) {
      return root.rel;
    }

    if (root.rel instanceof Sort) {
      Sort innerSort = (Sort) root.rel;
      final int offset = Calcites.getOffset(innerSort);
      final int innerLimit = Calcites.getFetch(innerSort);
      final int fetch = Calcites.collapseFetch(
          innerLimit,
          Ints.checkedCast(outerLimit),
          0
      );

      if (fetch == innerLimit) {
        // nothing to do, don't bother to make a new sort
        return root.rel;
      }

      return LogicalSort.create(
          innerSort.getInput(),
          innerSort.collation,
          offset > 0 ? makeBigIntLiteral(offset) : null,
          makeBigIntLiteral(fetch)
      );
    }
    return LogicalSort.create(
        root.rel,
        root.collation,
        null,
        makeBigIntLiteral(outerLimit)
    );
  }

  private RexNode makeBigIntLiteral(long value)
  {
    return rexBuilder.makeLiteral(
        value,
        new BasicSqlType(DruidTypeSystem.INSTANCE, SqlTypeName.BIGINT),
        false
    );
  }

  private static class EnumeratorIterator implements Iterator
  {
    private final Iterator it;

    EnumeratorIterator(Iterator it)
    {
      this.it = it;
    }

    @Override
    public boolean hasNext()
    {
      return it.hasNext();
    }

    @Override
    public T next()
    {
      return it.next();
    }
  }

  private PlannerResult planExplanation(
      final RelNode rel,
      final SqlExplain explain,
      final Set datasourceNames
  )
  {
    final String explanation = RelOptUtil.dumpPlan("", rel, explain.getFormat(), explain.getDetailLevel());
    final Supplier> resultsSupplier = Suppliers.ofInstance(
        Sequences.simple(ImmutableList.of(new Object[]{explanation})));
    final RelDataTypeFactory typeFactory = rel.getCluster().getTypeFactory();
    return new PlannerResult(
        resultsSupplier,
        typeFactory.createStructType(
            ImmutableList.of(Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR)),
            ImmutableList.of("PLAN")
        ),
        datasourceNames
    );
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy