Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright 2024 Hazelcast Inc.
*
* Licensed under the Hazelcast Community License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://hazelcast.com/hazelcast-community-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.sql.impl;
import com.hazelcast.cluster.Address;
import com.hazelcast.cluster.Member;
import com.hazelcast.config.BitmapIndexOptions;
import com.hazelcast.config.IndexConfig;
import com.hazelcast.config.IndexType;
import com.hazelcast.core.HazelcastException;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.dataconnection.DataConnection;
import com.hazelcast.dataconnection.impl.DataConnectionServiceImpl;
import com.hazelcast.dataconnection.impl.InternalDataConnectionService;
import com.hazelcast.internal.serialization.InternalSerializationService;
import com.hazelcast.internal.util.PartitioningStrategyUtil;
import com.hazelcast.jet.Job;
import com.hazelcast.jet.JobStateSnapshot;
import com.hazelcast.jet.RestartableException;
import com.hazelcast.jet.config.JobConfig;
import com.hazelcast.jet.config.JobConfigArguments;
import com.hazelcast.jet.datamodel.Tuple2;
import com.hazelcast.jet.impl.AbstractJetInstance;
import com.hazelcast.jet.impl.JetServiceBackend;
import com.hazelcast.jet.impl.util.Util;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.AlterJobPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateIndexPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateJobPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateMappingPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateSnapshotPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateTypePlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateViewPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DmlPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropDataConnectionPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropJobPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropMappingPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropSnapshotPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropTypePlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.DropViewPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.ExplainStatementPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.IMapDeletePlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.IMapInsertPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.IMapSelectPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.IMapSinkPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.IMapUpdatePlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.SelectPlan;
import com.hazelcast.jet.sql.impl.SqlPlanImpl.ShowStatementPlan;
import com.hazelcast.jet.sql.impl.parse.SqlShowStatement.ShowStatementTarget;
import com.hazelcast.jet.sql.impl.schema.DataConnectionResolver;
import com.hazelcast.jet.sql.impl.schema.TableResolverImpl;
import com.hazelcast.jet.sql.impl.validate.UpdateDataConnectionOperation;
import com.hazelcast.logging.ILogger;
import com.hazelcast.map.IMap;
import com.hazelcast.map.impl.EntryRemovingProcessor;
import com.hazelcast.map.impl.MapContainer;
import com.hazelcast.map.impl.MapService;
import com.hazelcast.map.impl.MapServiceContext;
import com.hazelcast.map.impl.proxy.MapProxyImpl;
import com.hazelcast.partition.PartitioningStrategy;
import com.hazelcast.partition.strategy.AttributePartitioningStrategy;
import com.hazelcast.partition.strategy.DefaultPartitioningStrategy;
import com.hazelcast.query.impl.getters.Extractors;
import com.hazelcast.spi.impl.NodeEngine;
import com.hazelcast.spi.impl.operationservice.impl.InvocationFuture;
import com.hazelcast.spi.properties.ClusterProperty;
import com.hazelcast.sql.SqlColumnMetadata;
import com.hazelcast.sql.SqlResult;
import com.hazelcast.sql.SqlRowMetadata;
import com.hazelcast.sql.impl.ParameterConverter;
import com.hazelcast.sql.impl.QueryException;
import com.hazelcast.sql.impl.QueryId;
import com.hazelcast.sql.impl.QueryParameterMetadata;
import com.hazelcast.sql.impl.SqlErrorCode;
import com.hazelcast.sql.impl.UpdateSqlResultImpl;
import com.hazelcast.sql.impl.expression.Expression;
import com.hazelcast.sql.impl.expression.ExpressionEvalContext;
import com.hazelcast.sql.impl.expression.UntrustedExpressionEvalContext;
import com.hazelcast.sql.impl.row.EmptyRow;
import com.hazelcast.sql.impl.row.JetSqlRow;
import com.hazelcast.sql.impl.schema.dataconnection.DataConnectionCatalogEntry;
import com.hazelcast.sql.impl.schema.type.Type;
import com.hazelcast.sql.impl.schema.view.View;
import com.hazelcast.sql.impl.security.SqlSecurityContext;
import com.hazelcast.sql.impl.state.QueryResultRegistry;
import com.hazelcast.sql.impl.type.QueryDataType;
import com.hazelcast.shaded.org.apache.calcite.rel.RelNode;
import com.hazelcast.shaded.org.apache.calcite.rel.type.RelDataTypeField;
import com.hazelcast.shaded.org.apache.calcite.sql.SqlNode;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.hazelcast.cluster.memberselector.MemberSelectors.DATA_MEMBER_SELECTOR;
import static com.hazelcast.config.BitmapIndexOptions.UniqueKeyTransformation;
import static com.hazelcast.jet.config.JobConfigArguments.KEY_SQL_QUERY_TEXT;
import static com.hazelcast.jet.config.JobConfigArguments.KEY_SQL_UNBOUNDED;
import static com.hazelcast.jet.datamodel.Tuple2.tuple2;
import static com.hazelcast.jet.impl.JetServiceBackend.SQL_ARGUMENTS_KEY_NAME;
import static com.hazelcast.jet.impl.util.ExceptionUtil.isTopologyException;
import static com.hazelcast.jet.impl.util.Util.getNodeEngine;
import static com.hazelcast.jet.sql.impl.SqlPlanImpl.CreateDataConnectionPlan;
import static com.hazelcast.jet.sql.impl.parse.SqlCreateIndex.UNIQUE_KEY;
import static com.hazelcast.jet.sql.impl.parse.SqlCreateIndex.UNIQUE_KEY_TRANSFORMATION;
import static com.hazelcast.jet.sql.impl.validate.types.HazelcastTypeUtils.toHazelcastType;
import static com.hazelcast.query.QueryConstants.KEY_ATTRIBUTE_NAME;
import static com.hazelcast.sql.SqlColumnType.JSON;
import static com.hazelcast.sql.SqlColumnType.VARCHAR;
import static com.hazelcast.sql.impl.QueryUtils.quoteCompoundIdentifier;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyIterator;
import static java.util.Collections.emptySet;
import static java.util.Collections.singletonList;
import static java.util.Comparator.comparing;
public class PlanExecutor {
private static final String LE = System.lineSeparator();
private static final String DEFAULT_UNIQUE_KEY_TRANSFORMATION = "OBJECT";
private final TableResolverImpl catalog;
private final DataConnectionResolver dataConnectionCatalog;
private final HazelcastInstance hazelcastInstance;
private final NodeEngine nodeEngine;
private final QueryResultRegistry resultRegistry;
private final List sqlJobInvocationObservers = new ArrayList<>();
private final ILogger logger;
// test-only
private final AtomicLong directIMapQueriesExecuted = new AtomicLong();
public PlanExecutor(
NodeEngine nodeEngine,
TableResolverImpl catalog,
DataConnectionResolver dataConnectionResolver,
QueryResultRegistry resultRegistry
) {
this.nodeEngine = nodeEngine;
this.hazelcastInstance = nodeEngine.getHazelcastInstance();
this.catalog = catalog;
this.dataConnectionCatalog = dataConnectionResolver;
this.resultRegistry = resultRegistry;
logger = nodeEngine.getLogger(getClass());
}
SqlResult execute(CreateMappingPlan plan, SqlSecurityContext ssc) {
catalog.createMapping(plan.mapping(), plan.replace(), plan.ifNotExists(), ssc);
return UpdateSqlResultImpl.createUpdateCountResult(0);
}
SqlResult execute(DropMappingPlan plan) {
catalog.removeMapping(plan.name(), plan.ifExists());
return UpdateSqlResultImpl.createUpdateCountResult(0);
}
SqlResult execute(CreateDataConnectionPlan plan) {
InternalDataConnectionService dlService = nodeEngine.getDataConnectionService();
assert !plan.ifNotExists() || !plan.isReplace();
if (dlService.existsConfigDataConnection(plan.name())) {
throw new HazelcastException("Cannot replace a data connection created from configuration");
}
// checks if type is correct
String normalizedTypeName = dlService.normalizedTypeName(plan.type());
dlService.classForDataConnectionType(normalizedTypeName);
boolean added = dataConnectionCatalog.createDataConnection(
new DataConnectionCatalogEntry(
plan.name(),
normalizedTypeName,
plan.shared(),
plan.options()),
plan.isReplace(),
plan.ifNotExists());
if (added) {
broadcastUpdateDataConnectionOperations(plan.name());
// TODO invoke the listeners so plans can be invalidated after the
// change was propagated to InternalDataConnectionService
dataConnectionCatalog.invokeChangeListeners();
}
return UpdateSqlResultImpl.createUpdateCountResult(0);
}
SqlResult execute(DropDataConnectionPlan plan) {
InternalDataConnectionService dlService = nodeEngine.getDataConnectionService();
if (dlService.existsConfigDataConnection(plan.name())) {
throw new HazelcastException("Data connection '" + plan.name() + "' is configured via Config and can't be removed");
}
dataConnectionCatalog.removeDataConnection(plan.name(), plan.ifExists());
broadcastUpdateDataConnectionOperations(plan.name());
// TODO invoke the listeners so plans can be invalidated after the
// change was propagated to InternalDataConnectionService
dataConnectionCatalog.invokeChangeListeners();
return UpdateSqlResultImpl.createUpdateCountResult(0);
}
SqlResult execute(CreateIndexPlan plan) {
MapContainer mapContainer = getMapContainer(hazelcastInstance.getMap(plan.mapName()));
if (!mapContainer.shouldUseGlobalIndex()) {
// for partitioned indexes checking existence is more complicated
// and SQL cannot yet use partitioned indexes
throw QueryException.error(SqlErrorCode.INDEX_INVALID, "Cannot create index \"" + plan.indexName()
+ "\" on the IMap \"" + plan.mapName() + "\" because it would not be global "
+ "(make sure the property \"" + ClusterProperty.GLOBAL_HD_INDEX_ENABLED
+ "\" is set to \"true\")");
}
if (!plan.ifNotExists()) {
// If `IF NOT EXISTS` isn't specified, we do a simple check for the existence of the index. This is not
// OK if two clients concurrently try to create the index (they could both succeed), but covers the
// common case. There's no atomic operation to create an index in IMDG, so it's not easy to implement.
if (mapContainer.getGlobalIndexRegistry().getIndex(plan.indexName()) != null) {
throw QueryException.error("Can't create index: index '" + plan.indexName() + "' already exists");
}
}
IndexConfig indexConfig = new IndexConfig(plan.indexType(), plan.attributes())
.setName(plan.indexName());
if (plan.indexType().equals(IndexType.BITMAP)) {
Map options = plan.options();
String uniqueKey = options.get(UNIQUE_KEY);
if (uniqueKey == null) {
uniqueKey = KEY_ATTRIBUTE_NAME.value();
}
String uniqueKeyTransform = options.get(UNIQUE_KEY_TRANSFORMATION);
if (uniqueKeyTransform == null) {
uniqueKeyTransform = DEFAULT_UNIQUE_KEY_TRANSFORMATION;
}
BitmapIndexOptions bitmapIndexOptions = new BitmapIndexOptions();
bitmapIndexOptions.setUniqueKey(uniqueKey);
bitmapIndexOptions.setUniqueKeyTransformation(UniqueKeyTransformation.fromName(uniqueKeyTransform));
indexConfig.setBitmapIndexOptions(bitmapIndexOptions);
}
// The `addIndex()` call does nothing, if an index with the same name already exists.
// Even if its config is different.
hazelcastInstance.getMap(plan.mapName()).addIndex(indexConfig);
return UpdateSqlResultImpl.createUpdateCountResult(0);
}
SqlResult execute(CreateJobPlan plan, List