Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright 2021 Hazelcast Inc.
*
* Licensed under the Hazelcast Community License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://hazelcast.com/hazelcast-community-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.sql.impl.schema;
import com.hazelcast.core.EntryEvent;
import com.hazelcast.core.LifecycleEvent;
import com.hazelcast.jet.sql.impl.connector.SqlConnector;
import com.hazelcast.jet.sql.impl.connector.SqlConnectorCache;
import com.hazelcast.jet.sql.impl.connector.infoschema.MappingColumnsTable;
import com.hazelcast.jet.sql.impl.connector.infoschema.MappingsTable;
import com.hazelcast.jet.sql.impl.connector.infoschema.TablesTable;
import com.hazelcast.jet.sql.impl.connector.infoschema.ViewsTable;
import com.hazelcast.jet.sql.impl.connector.virtual.ViewTable;
import com.hazelcast.spi.impl.NodeEngine;
import com.hazelcast.sql.impl.QueryException;
import com.hazelcast.sql.impl.schema.BadTable;
import com.hazelcast.sql.impl.schema.ConstantTableStatistics;
import com.hazelcast.sql.impl.schema.Mapping;
import com.hazelcast.sql.impl.schema.MappingField;
import com.hazelcast.sql.impl.schema.Table;
import com.hazelcast.sql.impl.schema.TableResolver;
import com.hazelcast.sql.impl.schema.type.Type;
import com.hazelcast.sql.impl.schema.view.View;
import javax.annotation.Nonnull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiFunction;
import static com.hazelcast.sql.impl.QueryUtils.CATALOG;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
/**
* A table resolver for DDL-created mappings and for the {@code
* information_schema}.
*/
public class TableResolverImpl implements TableResolver {
public static final String SCHEMA_NAME_PUBLIC = "public";
public static final String SCHEMA_NAME_INFORMATION_SCHEMA = "information_schema";
private static final List> SEARCH_PATHS = singletonList(
asList(CATALOG, SCHEMA_NAME_PUBLIC)
);
private static final List, List, Table>> ADDITIONAL_TABLE_PRODUCERS = Arrays.asList(
(m, v) -> new TablesTable(CATALOG, SCHEMA_NAME_INFORMATION_SCHEMA, SCHEMA_NAME_PUBLIC, m, v),
(m, v) -> new MappingsTable(CATALOG, SCHEMA_NAME_INFORMATION_SCHEMA, SCHEMA_NAME_PUBLIC, m),
(m, v) -> new MappingColumnsTable(CATALOG, SCHEMA_NAME_INFORMATION_SCHEMA, SCHEMA_NAME_PUBLIC, m, v),
(m, v) -> new ViewsTable(CATALOG, SCHEMA_NAME_INFORMATION_SCHEMA, SCHEMA_NAME_PUBLIC, v)
);
private final NodeEngine nodeEngine;
private final TablesStorage tableStorage;
private final SqlConnectorCache connectorCache;
private final List listeners;
// These fields should normally be volatile because we're accessing them from multiple threads. But we
// don't care if some thread doesn't see a newer value written by another thread. Each thread will write
// the same value (we assume that the number of mappings and views doesn't change much), so we
// shave a tiny bit of performance from not synchronizing :)
private int lastViewsSize;
private int lastMappingsSize;
public TableResolverImpl(
NodeEngine nodeEngine,
TablesStorage tableStorage,
SqlConnectorCache connectorCache
) {
this.nodeEngine = nodeEngine;
this.tableStorage = tableStorage;
this.connectorCache = connectorCache;
this.listeners = new CopyOnWriteArrayList<>();
// because listeners are invoked asynchronously from the calling thread,
// local changes are handled in createMapping() & removeMapping(), thus
// we skip events originating from local member to avoid double processing
nodeEngine.getHazelcastInstance().getLifecycleService().addLifecycleListener(event -> {
if (event.getState() == LifecycleEvent.LifecycleState.STARTED) {
this.tableStorage.initializeWithListener(new TablesStorage.EntryListenerAdapter() {
@Override
public void entryUpdated(EntryEvent event) {
if (!event.getMember().localMember()) {
listeners.forEach(TableListener::onTableChanged);
}
}
@Override
public void entryRemoved(EntryEvent event) {
if (!event.getMember().localMember()) {
listeners.forEach(TableListener::onTableChanged);
}
}
});
}
});
}
// region mapping
public void createMapping(Mapping mapping, boolean replace, boolean ifNotExists) {
Mapping resolved = resolveMapping(mapping);
String name = resolved.name();
if (ifNotExists) {
tableStorage.putIfAbsent(name, resolved);
} else if (replace) {
tableStorage.put(name, resolved);
listeners.forEach(TableListener::onTableChanged);
} else if (!tableStorage.putIfAbsent(name, resolved)) {
throw QueryException.error("Mapping or view already exists: " + name);
}
}
private Mapping resolveMapping(Mapping mapping) {
String type = mapping.type();
Map options = mapping.options();
SqlConnector connector = connectorCache.forType(type);
List resolvedFields = connector.resolveAndValidateFields(
nodeEngine,
options,
mapping.fields(),
mapping.externalName()
);
return new Mapping(
mapping.name(),
mapping.externalName(),
type,
new ArrayList<>(resolvedFields),
new LinkedHashMap<>(options)
);
}
public void removeMapping(String name, boolean ifExists) {
if (tableStorage.removeMapping(name) != null) {
listeners.forEach(TableListener::onTableChanged);
} else if (!ifExists) {
throw QueryException.error("Mapping does not exist: " + name);
}
}
@Nonnull
public Collection getMappingNames() {
return tableStorage.mappingNames();
}
// endregion
// region view
public void createView(View view, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
tableStorage.putIfAbsent(view.name(), view);
} else if (replace) {
tableStorage.put(view.name(), view);
} else if (!tableStorage.putIfAbsent(view.name(), view)) {
throw QueryException.error("Mapping or view already exists: " + view.name());
}
}
public View getView(String name) {
return tableStorage.getView(name);
}
public void removeView(String name, boolean ifExists) {
if (tableStorage.removeView(name) == null && !ifExists) {
throw QueryException.error("View does not exist: " + name);
}
}
@Nonnull
public Collection getViewNames() {
return tableStorage.viewNames();
}
// endregion
// region type
public Collection getTypeNames() {
return tableStorage.typeNames();
}
public Collection getTypes() {
return tableStorage.getAllTypes();
}
public void createType(Type type, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
tableStorage.putIfAbsent(type.getName(), type);
} else if (replace) {
tableStorage.put(type.getName(), type);
} else if (!tableStorage.putIfAbsent(type.getName(), type)) {
throw QueryException.error("Type already exists: " + type.getName());
}
}
public void removeType(String name, boolean ifExists) {
if (tableStorage.removeType(name) == null && !ifExists) {
throw QueryException.error("Type does not exist: " + name);
}
}
// endregion
@Nonnull
@Override
public List> getDefaultSearchPaths() {
return SEARCH_PATHS;
}
@Nonnull
@Override
public List