org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of x-pack-esql Show documentation
Show all versions of x-pack-esql Show documentation
The plugin that powers ESQL for Elasticsearch
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.planner;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.compute.Describable;
import org.elasticsearch.compute.data.Block;
import org.elasticsearch.compute.data.BlockFactory;
import org.elasticsearch.compute.data.ElementType;
import org.elasticsearch.compute.data.LocalCircuitBreaker;
import org.elasticsearch.compute.data.Page;
import org.elasticsearch.compute.lucene.LuceneOperator;
import org.elasticsearch.compute.operator.ColumnExtractOperator;
import org.elasticsearch.compute.operator.ColumnLoadOperator;
import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.operator.EvalOperator.EvalOperatorFactory;
import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator;
import org.elasticsearch.compute.operator.FilterOperator.FilterOperatorFactory;
import org.elasticsearch.compute.operator.LocalSourceOperator;
import org.elasticsearch.compute.operator.LocalSourceOperator.LocalSourceFactory;
import org.elasticsearch.compute.operator.MvExpandOperator;
import org.elasticsearch.compute.operator.Operator;
import org.elasticsearch.compute.operator.Operator.OperatorFactory;
import org.elasticsearch.compute.operator.OutputOperator.OutputOperatorFactory;
import org.elasticsearch.compute.operator.RowInTableLookupOperator;
import org.elasticsearch.compute.operator.RowOperator.RowOperatorFactory;
import org.elasticsearch.compute.operator.ShowOperator;
import org.elasticsearch.compute.operator.SinkOperator;
import org.elasticsearch.compute.operator.SinkOperator.SinkOperatorFactory;
import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory;
import org.elasticsearch.compute.operator.StringExtractOperator;
import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler;
import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator.ExchangeSinkOperatorFactory;
import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler;
import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator.ExchangeSourceOperatorFactory;
import org.elasticsearch.compute.operator.topn.TopNEncoder;
import org.elasticsearch.compute.operator.topn.TopNOperator;
import org.elasticsearch.compute.operator.topn.TopNOperator.TopNOperatorFactory;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException;
import org.elasticsearch.xpack.esql.core.expression.Alias;
import org.elasticsearch.xpack.esql.core.expression.Attribute;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.expression.Expressions;
import org.elasticsearch.xpack.esql.core.expression.Literal;
import org.elasticsearch.xpack.esql.core.expression.NameId;
import org.elasticsearch.xpack.esql.core.expression.NamedExpression;
import org.elasticsearch.xpack.esql.core.expression.Order;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.core.util.Holder;
import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator;
import org.elasticsearch.xpack.esql.enrich.EnrichLookupService;
import org.elasticsearch.xpack.esql.evaluator.EvalMapper;
import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter;
import org.elasticsearch.xpack.esql.plan.physical.AggregateExec;
import org.elasticsearch.xpack.esql.plan.physical.DissectExec;
import org.elasticsearch.xpack.esql.plan.physical.EnrichExec;
import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec;
import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec;
import org.elasticsearch.xpack.esql.plan.physical.EvalExec;
import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec;
import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec;
import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec;
import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec;
import org.elasticsearch.xpack.esql.plan.physical.FilterExec;
import org.elasticsearch.xpack.esql.plan.physical.GrokExec;
import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec;
import org.elasticsearch.xpack.esql.plan.physical.LimitExec;
import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec;
import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec;
import org.elasticsearch.xpack.esql.plan.physical.OutputExec;
import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan;
import org.elasticsearch.xpack.esql.plan.physical.ProjectExec;
import org.elasticsearch.xpack.esql.plan.physical.RowExec;
import org.elasticsearch.xpack.esql.plan.physical.ShowExec;
import org.elasticsearch.xpack.esql.plan.physical.TopNExec;
import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
import org.elasticsearch.xpack.esql.session.EsqlConfiguration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static java.util.Arrays.asList;
import static java.util.stream.Collectors.joining;
import static org.elasticsearch.compute.operator.LimitOperator.Factory;
import static org.elasticsearch.compute.operator.ProjectOperator.ProjectOperatorFactory;
import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToInt;
/**
* The local execution planner takes a plan (represented as PlanNode tree / digraph) as input and creates the corresponding
* drivers that are used to execute the given plan.
*/
public class LocalExecutionPlanner {
private static final Logger logger = LogManager.getLogger(LocalExecutionPlanner.class);
private final String sessionId;
private final String clusterAlias;
private final CancellableTask parentTask;
private final BigArrays bigArrays;
private final BlockFactory blockFactory;
private final Settings settings;
private final EsqlConfiguration configuration;
private final ExchangeSourceHandler exchangeSourceHandler;
private final ExchangeSinkHandler exchangeSinkHandler;
private final EnrichLookupService enrichLookupService;
private final PhysicalOperationProviders physicalOperationProviders;
public LocalExecutionPlanner(
String sessionId,
String clusterAlias,
CancellableTask parentTask,
BigArrays bigArrays,
BlockFactory blockFactory,
Settings settings,
EsqlConfiguration configuration,
ExchangeSourceHandler exchangeSourceHandler,
ExchangeSinkHandler exchangeSinkHandler,
EnrichLookupService enrichLookupService,
PhysicalOperationProviders physicalOperationProviders
) {
this.sessionId = sessionId;
this.clusterAlias = clusterAlias;
this.parentTask = parentTask;
this.bigArrays = bigArrays;
this.blockFactory = blockFactory;
this.settings = settings;
this.exchangeSourceHandler = exchangeSourceHandler;
this.exchangeSinkHandler = exchangeSinkHandler;
this.enrichLookupService = enrichLookupService;
this.physicalOperationProviders = physicalOperationProviders;
this.configuration = configuration;
}
/**
* turn the given plan into a list of drivers to execute
*/
public LocalExecutionPlan plan(PhysicalPlan localPhysicalPlan) {
var context = new LocalExecutionPlannerContext(
new ArrayList<>(),
new Holder<>(DriverParallelism.SINGLE),
configuration.pragmas(),
bigArrays,
blockFactory,
settings
);
// workaround for https://github.com/elastic/elasticsearch/issues/99782
localPhysicalPlan = localPhysicalPlan.transformUp(
AggregateExec.class,
a -> a.getMode() == AggregateExec.Mode.FINAL ? new ProjectExec(a.source(), a, Expressions.asAttributes(a.aggregates())) : a
);
PhysicalOperation physicalOperation = plan(localPhysicalPlan, context);
final TimeValue statusInterval = configuration.pragmas().statusInterval();
context.addDriverFactory(
new DriverFactory(
new DriverSupplier(context.bigArrays, context.blockFactory, physicalOperation, statusInterval, settings),
context.driverParallelism().get()
)
);
return new LocalExecutionPlan(context.driverFactories);
}
private PhysicalOperation plan(PhysicalPlan node, LocalExecutionPlannerContext context) {
if (node instanceof AggregateExec aggregate) {
return planAggregation(aggregate, context);
} else if (node instanceof FieldExtractExec fieldExtractExec) {
return planFieldExtractNode(fieldExtractExec, context);
} else if (node instanceof ExchangeExec exchangeExec) {
return planExchange(exchangeExec, context);
} else if (node instanceof TopNExec topNExec) {
return planTopN(topNExec, context);
} else if (node instanceof EvalExec eval) {
return planEval(eval, context);
} else if (node instanceof DissectExec dissect) {
return planDissect(dissect, context);
} else if (node instanceof GrokExec grok) {
return planGrok(grok, context);
} else if (node instanceof ProjectExec project) {
return planProject(project, context);
} else if (node instanceof FilterExec filter) {
return planFilter(filter, context);
} else if (node instanceof LimitExec limit) {
return planLimit(limit, context);
} else if (node instanceof MvExpandExec mvExpand) {
return planMvExpand(mvExpand, context);
}
// source nodes
else if (node instanceof EsQueryExec esQuery) {
return planEsQueryNode(esQuery, context);
} else if (node instanceof EsStatsQueryExec statsQuery) {
return planEsStats(statsQuery, context);
} else if (node instanceof RowExec row) {
return planRow(row, context);
} else if (node instanceof LocalSourceExec localSource) {
return planLocal(localSource, context);
} else if (node instanceof ShowExec show) {
return planShow(show);
} else if (node instanceof ExchangeSourceExec exchangeSource) {
return planExchangeSource(exchangeSource, context);
}
// lookups and joins
else if (node instanceof EnrichExec enrich) {
return planEnrich(enrich, context);
} else if (node instanceof HashJoinExec lookup) {
return planHashJoin(lookup, context);
}
// output
else if (node instanceof OutputExec outputExec) {
return planOutput(outputExec, context);
} else if (node instanceof ExchangeSinkExec exchangeSink) {
return planExchangeSink(exchangeSink, context);
}
throw new EsqlIllegalArgumentException("unknown physical plan node [" + node.nodeName() + "]");
}
private PhysicalOperation planAggregation(AggregateExec aggregate, LocalExecutionPlannerContext context) {
var source = plan(aggregate.child(), context);
return physicalOperationProviders.groupingPhysicalOperation(aggregate, source, context);
}
private PhysicalOperation planEsQueryNode(EsQueryExec esQueryExec, LocalExecutionPlannerContext context) {
return physicalOperationProviders.sourcePhysicalOperation(esQueryExec, context);
}
private PhysicalOperation planEsStats(EsStatsQueryExec statsQuery, LocalExecutionPlannerContext context) {
if (physicalOperationProviders instanceof EsPhysicalOperationProviders == false) {
throw new EsqlIllegalArgumentException("EsStatsQuery should only occur against a Lucene backend");
}
if (statsQuery.stats().size() > 1) {
throw new EsqlIllegalArgumentException("EsStatsQuery currently supports only one field statistic");
}
// for now only one stat is supported
EsStatsQueryExec.Stat stat = statsQuery.stats().get(0);
EsPhysicalOperationProviders esProvider = (EsPhysicalOperationProviders) physicalOperationProviders;
final LuceneOperator.Factory luceneFactory = esProvider.countSource(context, stat.filter(statsQuery.query()), statsQuery.limit());
Layout.Builder layout = new Layout.Builder();
layout.append(statsQuery.outputSet());
int instanceCount = Math.max(1, luceneFactory.taskConcurrency());
context.driverParallelism(new DriverParallelism(DriverParallelism.Type.DATA_PARALLELISM, instanceCount));
return PhysicalOperation.fromSource(luceneFactory, layout.build());
}
private PhysicalOperation planFieldExtractNode(FieldExtractExec fieldExtractExec, LocalExecutionPlannerContext context) {
return physicalOperationProviders.fieldExtractPhysicalOperation(fieldExtractExec, plan(fieldExtractExec.child(), context));
}
private PhysicalOperation planOutput(OutputExec outputExec, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(outputExec.child(), context);
var output = outputExec.output();
return source.withSink(
new OutputOperatorFactory(
Expressions.names(output),
alignPageToAttributes(output, source.layout),
outputExec.getPageConsumer()
),
source.layout
);
}
private static Function alignPageToAttributes(List attrs, Layout layout) {
// align the page layout with the operator output
// extraction order - the list ordinal is the same as the column one
// while the value represents the position in the original page
final int[] mappedPosition = new int[attrs.size()];
int index = -1;
boolean transformRequired = false;
for (var attribute : attrs) {
mappedPosition[++index] = layout.get(attribute.id()).channel();
transformRequired |= mappedPosition[index] != index;
}
Function transformer = transformRequired ? p -> {
var blocks = new Block[mappedPosition.length];
for (int i = 0; i < blocks.length; i++) {
blocks[i] = p.getBlock(mappedPosition[i]);
blocks[i].incRef();
}
p.releaseBlocks();
return new Page(blocks);
} : Function.identity();
return transformer;
}
private PhysicalOperation planExchange(ExchangeExec exchangeExec, LocalExecutionPlannerContext context) {
throw new UnsupportedOperationException("Exchange needs to be replaced with a sink/source");
}
private PhysicalOperation planExchangeSink(ExchangeSinkExec exchangeSink, LocalExecutionPlannerContext context) {
Objects.requireNonNull(exchangeSinkHandler, "ExchangeSinkHandler wasn't provided");
var child = exchangeSink.child();
PhysicalOperation source = plan(child, context);
Function transformer = exchangeSink.isIntermediateAgg()
? Function.identity()
: alignPageToAttributes(exchangeSink.output(), source.layout);
return source.withSink(new ExchangeSinkOperatorFactory(exchangeSinkHandler::createExchangeSink, transformer), source.layout);
}
private PhysicalOperation planExchangeSource(ExchangeSourceExec exchangeSource, LocalExecutionPlannerContext context) {
Objects.requireNonNull(exchangeSourceHandler, "ExchangeSourceHandler wasn't provided");
var builder = new Layout.Builder();
builder.append(exchangeSource.output());
// decorate the layout
var l = builder.build();
var layout = exchangeSource.isIntermediateAgg() ? new ExchangeLayout(l) : l;
return PhysicalOperation.fromSource(new ExchangeSourceOperatorFactory(exchangeSourceHandler::createExchangeSource), layout);
}
private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(topNExec.child(), context);
ElementType[] elementTypes = new ElementType[source.layout.numberOfChannels()];
TopNEncoder[] encoders = new TopNEncoder[source.layout.numberOfChannels()];
List inverse = source.layout.inverse();
for (int channel = 0; channel < inverse.size(); channel++) {
elementTypes[channel] = PlannerUtils.toElementType(inverse.get(channel).type());
encoders[channel] = switch (inverse.get(channel).type()) {
case IP -> TopNEncoder.IP;
case TEXT, KEYWORD -> TopNEncoder.UTF8;
case VERSION -> TopNEncoder.VERSION;
case BOOLEAN, NULL, BYTE, SHORT, INTEGER, LONG, DOUBLE, FLOAT, HALF_FLOAT, DATETIME, DATE_PERIOD, TIME_DURATION, OBJECT,
NESTED, SCALED_FLOAT, UNSIGNED_LONG, DOC_DATA_TYPE, TSID_DATA_TYPE -> TopNEncoder.DEFAULT_SORTABLE;
case GEO_POINT, CARTESIAN_POINT, GEO_SHAPE, CARTESIAN_SHAPE, COUNTER_LONG, COUNTER_INTEGER, COUNTER_DOUBLE ->
TopNEncoder.DEFAULT_UNSORTABLE;
// unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point
case PARTIAL_AGG, UNSUPPORTED -> TopNEncoder.UNSUPPORTED;
case SOURCE -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type());
};
}
List orders = topNExec.order().stream().map(order -> {
int sortByChannel;
if (order.child() instanceof Attribute a) {
sortByChannel = source.layout.get(a.id()).channel();
} else {
throw new EsqlIllegalArgumentException("order by expression must be an attribute");
}
return new TopNOperator.SortOrder(
sortByChannel,
order.direction().equals(Order.OrderDirection.ASC),
order.nullsPosition().equals(Order.NullsPosition.FIRST)
);
}).toList();
int limit;
if (topNExec.limit() instanceof Literal literal) {
limit = stringToInt(literal.value().toString());
} else {
throw new EsqlIllegalArgumentException("limit only supported with literal values");
}
// TODO Replace page size with passing estimatedRowSize down
/*
* The 2000 below is a hack to account for incoming size and to make
* sure the estimated row size is never 0 which'd cause a divide by 0.
* But we should replace this with passing the estimate into the real
* topn and letting it actually measure the size of rows it produces.
* That'll be more accurate. And we don't have a path for estimating
* incoming rows. And we don't need one because we can estimate.
*/
return source.with(
new TopNOperatorFactory(
limit,
asList(elementTypes),
asList(encoders),
orders,
context.pageSize(2000 + topNExec.estimatedRowSize())
),
source.layout
);
}
private PhysicalOperation planEval(EvalExec eval, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(eval.child(), context);
for (Alias field : eval.fields()) {
var evaluatorSupplier = EvalMapper.toEvaluator(field.child(), source.layout);
Layout.Builder layout = source.layout.builder();
layout.append(field.toAttribute());
source = source.with(new EvalOperatorFactory(evaluatorSupplier), layout.build());
}
return source;
}
private PhysicalOperation planDissect(DissectExec dissect, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(dissect.child(), context);
Layout.Builder layoutBuilder = source.layout.builder();
layoutBuilder.append(dissect.extractedFields());
final Expression expr = dissect.inputExpression();
// Names in the pattern and layout can differ.
// Attributes need to be rename-able to avoid problems with shadowing - see GeneratingPlan resp. PushDownRegexExtract.
String[] patternNames = Expressions.names(dissect.parser().keyAttributes(Source.EMPTY)).toArray(new String[0]);
Layout layout = layoutBuilder.build();
source = source.with(
new StringExtractOperator.StringExtractOperatorFactory(
patternNames,
EvalMapper.toEvaluator(expr, layout),
() -> (input) -> dissect.parser().parser().parse(input)
),
layout
);
return source;
}
private PhysicalOperation planGrok(GrokExec grok, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(grok.child(), context);
Layout.Builder layoutBuilder = source.layout.builder();
List extractedFields = grok.extractedFields();
layoutBuilder.append(extractedFields);
Map fieldToPos = new HashMap<>(extractedFields.size());
Map fieldToType = new HashMap<>(extractedFields.size());
ElementType[] types = new ElementType[extractedFields.size()];
List extractedFieldsFromPattern = grok.pattern().extractedFields();
for (int i = 0; i < extractedFields.size(); i++) {
DataType extractedFieldType = extractedFields.get(i).dataType();
// Names in pattern and layout can differ.
// Attributes need to be rename-able to avoid problems with shadowing - see GeneratingPlan resp. PushDownRegexExtract.
String patternName = extractedFieldsFromPattern.get(i).name();
ElementType type = PlannerUtils.toElementType(extractedFieldType);
fieldToPos.put(patternName, i);
fieldToType.put(patternName, type);
types[i] = type;
}
Layout layout = layoutBuilder.build();
source = source.with(
new ColumnExtractOperator.Factory(
types,
EvalMapper.toEvaluator(grok.inputExpression(), layout),
() -> new GrokEvaluatorExtracter(grok.pattern().grok(), grok.pattern().pattern(), fieldToPos, fieldToType)
),
layout
);
return source;
}
private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(enrich.child(), context);
Layout.Builder layoutBuilder = source.layout.builder();
layoutBuilder.append(enrich.enrichFields());
Layout layout = layoutBuilder.build();
String enrichIndex = enrich.concreteIndices().get(clusterAlias);
if (enrichIndex == null) {
throw new EsqlIllegalArgumentException("No concrete enrich index for cluster [" + clusterAlias + "]");
}
Layout.ChannelAndType input = source.layout.get(enrich.matchField().id());
return source.with(
new EnrichLookupOperator.Factory(
sessionId,
parentTask,
context.queryPragmas().enrichMaxWorkers(),
input.channel(),
enrichLookupService,
input.type(),
enrichIndex,
enrich.matchType(),
enrich.policyMatchField(),
enrich.enrichFields()
),
layout
);
}
private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerContext context) {
PhysicalOperation source = plan(join.child(), context);
int positionsChannel = source.layout.numberOfChannels();
Layout.Builder layoutBuilder = source.layout.builder();
for (Attribute f : join.output()) {
if (join.child().outputSet().contains(f)) {
continue;
}
layoutBuilder.append(f);
}
Layout layout = layoutBuilder.build();
Block[] localData = join.joinData().supplier().get();
RowInTableLookupOperator.Key[] keys = new RowInTableLookupOperator.Key[join.leftFields().size()];
int[] blockMapping = new int[join.leftFields().size()];
for (int k = 0; k < join.leftFields().size(); k++) {
Attribute left = join.leftFields().get(k);
Attribute right = join.rightFields().get(k);
Block localField = null;
for (int l = 0; l < join.joinData().output().size(); l++) {
if (join.joinData().output().get(l).name().equals((((NamedExpression) right).name()))) {
localField = localData[l];
}
}
if (localField == null) {
throw new IllegalArgumentException("can't find local data for [" + right + "]");
}
keys[k] = new RowInTableLookupOperator.Key(left.name(), localField);
Layout.ChannelAndType input = source.layout.get(left.id());
blockMapping[k] = input.channel();
}
// Load the "positions" of each match
source = source.with(new RowInTableLookupOperator.Factory(keys, blockMapping), layout);
// Load the "values" from each match
for (Attribute f : join.addedFields()) {
Block localField = null;
for (int l = 0; l < join.joinData().output().size(); l++) {
if (join.joinData().output().get(l).name().equals(f.name())) {
localField = localData[l];
}
}
if (localField == null) {
throw new IllegalArgumentException("can't find local data for [" + f + "]");
}
source = source.with(
new ColumnLoadOperator.Factory(new ColumnLoadOperator.Values(f.name(), localField), positionsChannel),
layout
);
}
// Drop the "positions" of the match
List projection = new ArrayList<>();
IntStream.range(0, positionsChannel).boxed().forEach(projection::add);
IntStream.range(positionsChannel + 1, positionsChannel + 1 + join.addedFields().size()).boxed().forEach(projection::add);
return source.with(new ProjectOperatorFactory(projection), layout);
}
private ExpressionEvaluator.Factory toEvaluator(Expression exp, Layout layout) {
return EvalMapper.toEvaluator(exp, layout);
}
private PhysicalOperation planRow(RowExec row, LocalExecutionPlannerContext context) {
List
© 2015 - 2024 Weber Informatics LLC | Privacy Policy