All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.hazelcast.jet.sql.impl.OptimizerContext Maven / Gradle / Ivy

There is a newer version: 5.5.0
Show newest version
/*
 * Copyright 2024 Hazelcast Inc.
 *
 * Licensed under the Hazelcast Community License (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://hazelcast.com/hazelcast-community-license
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.hazelcast.jet.sql.impl;

import com.hazelcast.shaded.com.google.common.collect.ImmutableList;
import com.hazelcast.jet.sql.impl.opt.cost.CostFactory;
import com.hazelcast.jet.sql.impl.opt.metadata.HazelcastRelMdBoundedness;
import com.hazelcast.jet.sql.impl.opt.metadata.HazelcastRelMdPrunability;
import com.hazelcast.jet.sql.impl.opt.metadata.HazelcastRelMdRowCount;
import com.hazelcast.jet.sql.impl.opt.metadata.HazelcastRelMdWatermarkedFields;
import com.hazelcast.jet.sql.impl.parse.QueryConvertResult;
import com.hazelcast.jet.sql.impl.parse.QueryConverter;
import com.hazelcast.jet.sql.impl.parse.QueryParseResult;
import com.hazelcast.jet.sql.impl.parse.QueryParser;
import com.hazelcast.jet.sql.impl.schema.HazelcastCalciteCatalogReader;
import com.hazelcast.jet.sql.impl.schema.HazelcastSchema;
import com.hazelcast.jet.sql.impl.schema.HazelcastSchemaUtils;
import com.hazelcast.jet.sql.impl.validate.HazelcastSqlValidator;
import com.hazelcast.jet.sql.impl.validate.types.HazelcastTypeFactory;
import com.hazelcast.logging.ILogger;
import com.hazelcast.sql.impl.QueryParameterMetadata;
import com.hazelcast.sql.impl.optimizer.PlanObjectKey;
import com.hazelcast.sql.impl.schema.IMapResolver;
import com.hazelcast.sql.impl.schema.SqlCatalog;
import com.hazelcast.sql.impl.security.SqlSecurityContext;
import com.hazelcast.shaded.org.apache.calcite.config.CalciteConnectionConfig;
import com.hazelcast.shaded.org.apache.calcite.jdbc.HazelcastRootCalciteSchema;
import com.hazelcast.shaded.org.apache.calcite.plan.Contexts;
import com.hazelcast.shaded.org.apache.calcite.plan.ConventionTraitDef;
import com.hazelcast.shaded.org.apache.calcite.plan.HazelcastRelOptCluster;
import com.hazelcast.shaded.org.apache.calcite.plan.RelTraitSet;
import com.hazelcast.shaded.org.apache.calcite.plan.hep.HepProgram;
import com.hazelcast.shaded.org.apache.calcite.plan.volcano.VolcanoPlanner;
import com.hazelcast.shaded.org.apache.calcite.prepare.Prepare;
import com.hazelcast.shaded.org.apache.calcite.rel.RelCollationTraitDef;
import com.hazelcast.shaded.org.apache.calcite.rel.RelNode;
import com.hazelcast.shaded.org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
import com.hazelcast.shaded.org.apache.calcite.rel.metadata.DefaultRelMetadataProvider;
import com.hazelcast.shaded.org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
import com.hazelcast.shaded.org.apache.calcite.rel.metadata.RelMetadataProvider;
import com.hazelcast.shaded.org.apache.calcite.sql.SqlNode;
import com.hazelcast.shaded.org.apache.calcite.tools.RuleSet;

import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * Optimizer context which holds the whole environment for the given optimization session.
 * Should not be re-used between optimization sessions.
 */
@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"})
public final class OptimizerContext {

    private static final ThreadLocal THREAD_CONTEXT = new ThreadLocal<>();

    private static final RelMetadataProvider METADATA_PROVIDER = ChainedRelMetadataProvider.of(ImmutableList.of(
            HazelcastRelMdRowCount.SOURCE,
            HazelcastRelMdBoundedness.SOURCE,
            HazelcastRelMdWatermarkedFields.SOURCE,
            HazelcastRelMdPrunability.SOURCE,
            DefaultRelMetadataProvider.INSTANCE
    ));

    private static final CalciteConnectionConfig CONNECTION_CONFIG = CalciteConfiguration.DEFAULT.toConnectionConfig();

    private final HazelcastRelOptCluster cluster;
    private final QueryParser parser;
    private final QueryConverter converter;
    private final QueryPlanner planner;
    private final Set usedViews = new HashSet<>();
    private final Deque viewExpansionStack = new ArrayDeque<>();

    private OptimizerContext(
            HazelcastRelOptCluster cluster,
            QueryParser parser,
            QueryConverter converter,
            QueryPlanner planner
    ) {
        this.cluster = cluster;
        this.parser = parser;
        this.converter = converter;
        this.planner = planner;
    }

    /**
     * Create the optimization context.
     *
     * @param searchPaths Search paths to support "current schema" feature.
     * @return Context.
     */
    public static OptimizerContext create(
            SqlCatalog schema,
            List> searchPaths,
            List arguments,
            IMapResolver iMapResolver,
            HepProgram subqueryRewriterProgram,
            SqlSecurityContext securityContext
    ) {
        // Resolve tables.
        HazelcastSchema rootSchema = HazelcastSchemaUtils.createRootSchema(schema);

        return create(
                rootSchema,
                searchPaths,
                arguments,
                iMapResolver,
                subqueryRewriterProgram,
                securityContext);
    }

    public static OptimizerContext create(
            HazelcastSchema rootSchema,
            List> schemaPaths,
            List arguments,
            IMapResolver iMapResolver,
            HepProgram subqueryRewriterProgram,
            SqlSecurityContext ssc
    ) {
        Prepare.CatalogReader catalogReader = createCatalogReader(rootSchema, schemaPaths);
        HazelcastSqlValidator validator = new HazelcastSqlValidator(catalogReader, arguments, iMapResolver, ssc);
        VolcanoPlanner volcanoPlanner = createPlanner();

        HazelcastRelOptCluster cluster = createCluster(volcanoPlanner, ssc);

        QueryParser parser = new QueryParser(validator);
        QueryConverter converter = new QueryConverter(validator, catalogReader, cluster, subqueryRewriterProgram);
        QueryPlanner planner = new QueryPlanner(volcanoPlanner);

        return new OptimizerContext(cluster, parser, converter, planner);
    }

    public static void setThreadContext(OptimizerContext context) {
        THREAD_CONTEXT.set(context);
    }

    public static OptimizerContext getThreadContext() {
        return THREAD_CONTEXT.get();
    }

    /**
     * Parse SQL statement.
     *
     * @param sql SQL string.
     * @return SQL tree.
     */
    public QueryParseResult parse(String sql) {
        return parser.parse(sql, cluster.getSecurityContext());
    }

    /**
     * Perform initial conversion of an SQL tree to a relational tree.
     *
     * @param node Query parse result.
     * @return Relational tree.
     */
    public QueryConvertResult convert(SqlNode node) {
        return converter.convert(node);
    }

    public RelNode convertView(SqlNode node) {
        return converter.convertView(node);
    }

    /**
     * Apply the given rules to the node.
     *
     * @param node     Node.
     * @param rules    Rules.
     * @param traitSet Required trait set.
     * @return Optimized node.
     */
    public RelNode optimize(RelNode node, RuleSet rules, RelTraitSet traitSet) {
        return planner.optimize(node, rules, traitSet);
    }

    public void setParameterMetadata(QueryParameterMetadata parameterMetadata) {
        cluster.setParameterMetadata(parameterMetadata);
    }

    public void setRequiresJob(boolean requiresJob) {
        cluster.setRequiresJob(requiresJob);
    }

    private static Prepare.CatalogReader createCatalogReader(HazelcastSchema rootSchema, List> searchPaths) {
        assert searchPaths != null;

        return new HazelcastCalciteCatalogReader(
                new HazelcastRootCalciteSchema(rootSchema),
                searchPaths,
                HazelcastTypeFactory.INSTANCE,
                CONNECTION_CONFIG);
    }

    private static VolcanoPlanner createPlanner() {
        VolcanoPlanner planner = new VolcanoPlanner(
                CostFactory.INSTANCE,
                Contexts.of(CONNECTION_CONFIG)
        );

        planner.clearRelTraitDefs();
        planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
        planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);

        return planner;
    }

    private static HazelcastRelOptCluster createCluster(
            VolcanoPlanner planner,
            SqlSecurityContext securityContext) {
        HazelcastRelOptCluster cluster = HazelcastRelOptCluster.create(
                planner,
                HazelcastRexBuilder.INSTANCE,
                securityContext);

        // Wire up custom metadata providers.
        cluster.setMetadataProvider(JaninoRelMetadataProvider.of(METADATA_PROVIDER));

        return cluster;
    }

    public Deque getViewExpansionStack() {
        return viewExpansionStack;
    }

    public Set getUsedViews() {
        return usedViews;
    }

    public SqlSecurityContext getSecurityContext() {
        return cluster.getSecurityContext();
    }

    public void dump(ILogger logger) {
        StringWriter sw = new StringWriter();
        final PrintWriter pw = new PrintWriter(sw);
        planner.dump(pw);
        pw.flush();
        logger.info(sw.toString());
    }
}