io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of trino-hive Show documentation
Show all versions of trino-hive Show documentation
This is a Databricks build of Trino's Hive plugin which includes support for HTTP based transport
for it's Hive metastore thrift interface.
The newest version!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.hive.metastore.thrift;
import com.google.common.collect.ImmutableMap;
import io.trino.hive.thrift.metastore.DataOperationType;
import io.trino.hive.thrift.metastore.FieldSchema;
import io.trino.plugin.hive.HiveColumnStatisticType;
import io.trino.plugin.hive.HivePartition;
import io.trino.plugin.hive.HiveType;
import io.trino.plugin.hive.PartitionStatistics;
import io.trino.plugin.hive.acid.AcidOperation;
import io.trino.plugin.hive.acid.AcidTransaction;
import io.trino.plugin.hive.metastore.AcidTransactionOwner;
import io.trino.plugin.hive.metastore.Database;
import io.trino.plugin.hive.metastore.HiveMetastore;
import io.trino.plugin.hive.metastore.HivePrincipal;
import io.trino.plugin.hive.metastore.HivePrivilegeInfo;
import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege;
import io.trino.plugin.hive.metastore.Partition;
import io.trino.plugin.hive.metastore.PartitionWithStatistics;
import io.trino.plugin.hive.metastore.PrincipalPrivileges;
import io.trino.plugin.hive.metastore.Table;
import io.trino.plugin.hive.util.HiveUtil;
import io.trino.spi.TrinoException;
import io.trino.spi.connector.SchemaNotFoundException;
import io.trino.spi.connector.SchemaTableName;
import io.trino.spi.connector.TableNotFoundException;
import io.trino.spi.predicate.TupleDomain;
import io.trino.spi.security.RoleGrant;
import io.trino.spi.type.Type;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static io.trino.plugin.hive.HiveMetadata.TABLE_COMMENT;
import static io.trino.plugin.hive.metastore.MetastoreUtil.isAvroTableWithSchemaSet;
import static io.trino.plugin.hive.metastore.MetastoreUtil.verifyCanDropColumn;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.csvSchemaFields;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.fromMetastoreApiDatabase;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.fromMetastoreApiTable;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.isAvroTableWithSchemaSet;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.isCsvTable;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.toMetastoreApiDatabase;
import static io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil.toMetastoreApiTable;
import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED;
import static io.trino.spi.security.PrincipalType.USER;
import static java.util.Objects.requireNonNull;
import static java.util.function.UnaryOperator.identity;
public class BridgingHiveMetastore
implements HiveMetastore
{
private final ThriftMetastore delegate;
public BridgingHiveMetastore(ThriftMetastore delegate)
{
this.delegate = requireNonNull(delegate, "delegate is null");
}
@Override
public Optional getDatabase(String databaseName)
{
return delegate.getDatabase(databaseName).map(ThriftMetastoreUtil::fromMetastoreApiDatabase);
}
@Override
public List getAllDatabases()
{
return delegate.getAllDatabases();
}
@Override
public Optional getTable(String databaseName, String tableName)
{
return delegate.getTable(databaseName, tableName).map(table -> {
if (isAvroTableWithSchemaSet(table)) {
return fromMetastoreApiTable(table, delegate.getFields(databaseName, tableName).orElseThrow());
}
if (isCsvTable(table)) {
return fromMetastoreApiTable(table, csvSchemaFields(table.getSd().getCols()));
}
return fromMetastoreApiTable(table);
});
}
@Override
public Set getSupportedColumnStatistics(Type type)
{
return delegate.getSupportedColumnStatistics(type);
}
@Override
public PartitionStatistics getTableStatistics(Table table)
{
return delegate.getTableStatistics(toMetastoreApiTable(table));
}
@Override
public Map getPartitionStatistics(Table table, List partitions)
{
return delegate.getPartitionStatistics(
toMetastoreApiTable(table),
partitions.stream()
.map(ThriftMetastoreUtil::toMetastoreApiPartition)
.collect(toImmutableList()));
}
@Override
public void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update)
{
delegate.updateTableStatistics(databaseName, tableName, transaction, update);
}
@Override
public void updatePartitionStatistics(Table table, Map> updates)
{
io.trino.hive.thrift.metastore.Table metastoreTable = toMetastoreApiTable(table);
updates.forEach((partitionName, update) -> delegate.updatePartitionStatistics(metastoreTable, partitionName, update));
}
@Override
public List getAllTables(String databaseName)
{
return delegate.getAllTables(databaseName);
}
@Override
public List getTablesWithParameter(String databaseName, String parameterKey, String parameterValue)
{
return delegate.getTablesWithParameter(databaseName, parameterKey, parameterValue);
}
@Override
public List getAllViews(String databaseName)
{
return delegate.getAllViews(databaseName);
}
@Override
public void createDatabase(Database database)
{
delegate.createDatabase(toMetastoreApiDatabase(database));
}
@Override
public void dropDatabase(String databaseName, boolean deleteData)
{
delegate.dropDatabase(databaseName, deleteData);
}
@Override
public void renameDatabase(String databaseName, String newDatabaseName)
{
io.trino.hive.thrift.metastore.Database database = delegate.getDatabase(databaseName)
.orElseThrow(() -> new SchemaNotFoundException(databaseName));
database.setName(newDatabaseName);
delegate.alterDatabase(databaseName, database);
delegate.getDatabase(databaseName).ifPresent(newDatabase -> {
if (newDatabase.getName().equals(databaseName)) {
throw new TrinoException(NOT_SUPPORTED, "Hive metastore does not support renaming schemas");
}
});
}
@Override
public void setDatabaseOwner(String databaseName, HivePrincipal principal)
{
Database database = fromMetastoreApiDatabase(delegate.getDatabase(databaseName)
.orElseThrow(() -> new SchemaNotFoundException(databaseName)));
Database newDatabase = Database.builder(database)
.setOwnerName(Optional.of(principal.getName()))
.setOwnerType(Optional.of(principal.getType()))
.build();
delegate.alterDatabase(databaseName, toMetastoreApiDatabase(newDatabase));
}
@Override
public void createTable(Table table, PrincipalPrivileges principalPrivileges)
{
delegate.createTable(toMetastoreApiTable(table, principalPrivileges));
}
@Override
public void dropTable(String databaseName, String tableName, boolean deleteData)
{
delegate.dropTable(databaseName, tableName, deleteData);
}
@Override
public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges)
{
alterTable(databaseName, tableName, toMetastoreApiTable(newTable, principalPrivileges));
}
@Override
public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName)
{
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
table.setDbName(newDatabaseName);
table.setTableName(newTableName);
alterTable(databaseName, tableName, table);
}
@Override
public void commentTable(String databaseName, String tableName, Optional comment)
{
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
Map parameters = table.getParameters().entrySet().stream()
.filter(entry -> !entry.getKey().equals(TABLE_COMMENT))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
comment.ifPresent(value -> parameters.put(TABLE_COMMENT, value));
table.setParameters(parameters);
alterTable(databaseName, tableName, table);
}
@Override
public void setTableOwner(String databaseName, String tableName, HivePrincipal principal)
{
// TODO Add role support https://github.com/trinodb/trino/issues/5706
if (principal.getType() != USER) {
throw new TrinoException(NOT_SUPPORTED, "Setting table owner type as a role is not supported");
}
Table table = fromMetastoreApiTable(delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))));
Table newTable = Table.builder(table)
.setOwner(Optional.of(principal.getName()))
.build();
delegate.alterTable(databaseName, tableName, toMetastoreApiTable(newTable));
}
@Override
public void commentColumn(String databaseName, String tableName, String columnName, Optional comment)
{
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
for (FieldSchema fieldSchema : table.getSd().getCols()) {
if (fieldSchema.getName().equals(columnName)) {
if (comment.isPresent()) {
fieldSchema.setComment(comment.get());
}
else {
fieldSchema.unsetComment();
}
}
}
alterTable(databaseName, tableName, table);
}
@Override
public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment)
{
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
table.getSd().getCols().add(
new FieldSchema(columnName, columnType.getHiveTypeName().toString(), columnComment));
alterTable(databaseName, tableName, table);
}
@Override
public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName)
{
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
for (FieldSchema fieldSchema : table.getPartitionKeys()) {
if (fieldSchema.getName().equals(oldColumnName)) {
throw new TrinoException(NOT_SUPPORTED, "Renaming partition columns is not supported");
}
}
for (FieldSchema fieldSchema : table.getSd().getCols()) {
if (fieldSchema.getName().equals(oldColumnName)) {
fieldSchema.setName(newColumnName);
}
}
alterTable(databaseName, tableName, table);
}
@Override
public void dropColumn(String databaseName, String tableName, String columnName)
{
verifyCanDropColumn(this, databaseName, tableName, columnName);
io.trino.hive.thrift.metastore.Table table = delegate.getTable(databaseName, tableName)
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName)));
table.getSd().getCols().removeIf(fieldSchema -> fieldSchema.getName().equals(columnName));
alterTable(databaseName, tableName, table);
}
private void alterTable(String databaseName, String tableName, io.trino.hive.thrift.metastore.Table table)
{
delegate.alterTable(databaseName, tableName, table);
}
@Override
public Optional getPartition(Table table, List partitionValues)
{
return delegate.getPartition(table.getDatabaseName(), table.getTableName(), partitionValues).map(partition -> fromMetastoreApiPartition(table, partition));
}
@Override
public Optional> getPartitionNamesByFilter(
String databaseName,
String tableName,
List columnNames,
TupleDomain partitionKeysFilter)
{
return delegate.getPartitionNamesByFilter(databaseName, tableName, columnNames, partitionKeysFilter);
}
@Override
public Map> getPartitionsByNames(Table table, List partitionNames)
{
requireNonNull(partitionNames, "partitionNames is null");
if (partitionNames.isEmpty()) {
return ImmutableMap.of();
}
Map> partitionNameToPartitionValuesMap = partitionNames.stream()
.collect(Collectors.toMap(identity(), HiveUtil::toPartitionValues));
Map, Partition> partitionValuesToPartitionMap = delegate.getPartitionsByNames(table.getDatabaseName(), table.getTableName(), partitionNames).stream()
.map(partition -> fromMetastoreApiPartition(table, partition))
.collect(Collectors.toMap(Partition::getValues, identity()));
ImmutableMap.Builder> resultBuilder = ImmutableMap.builder();
for (Map.Entry> entry : partitionNameToPartitionValuesMap.entrySet()) {
Partition partition = partitionValuesToPartitionMap.get(entry.getValue());
resultBuilder.put(entry.getKey(), Optional.ofNullable(partition));
}
return resultBuilder.buildOrThrow();
}
private static Partition fromMetastoreApiPartition(Table table, io.trino.hive.thrift.metastore.Partition partition)
{
if (isAvroTableWithSchemaSet(table)) {
List schema = table.getDataColumns().stream()
.map(ThriftMetastoreUtil::toMetastoreApiFieldSchema)
.collect(toImmutableList());
return ThriftMetastoreUtil.fromMetastoreApiPartition(partition, schema);
}
return ThriftMetastoreUtil.fromMetastoreApiPartition(partition);
}
@Override
public void addPartitions(String databaseName, String tableName, List partitions)
{
delegate.addPartitions(databaseName, tableName, partitions);
}
@Override
public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData)
{
delegate.dropPartition(databaseName, tableName, parts, deleteData);
}
@Override
public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition)
{
delegate.alterPartition(databaseName, tableName, partition);
}
@Override
public void createRole(String role, String grantor)
{
delegate.createRole(role, grantor);
}
@Override
public void dropRole(String role)
{
delegate.dropRole(role);
}
@Override
public Set listRoles()
{
return delegate.listRoles();
}
@Override
public void grantRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor)
{
delegate.grantRoles(roles, grantees, adminOption, grantor);
}
@Override
public void revokeRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor)
{
delegate.revokeRoles(roles, grantees, adminOption, grantor);
}
@Override
public Set listGrantedPrincipals(String role)
{
return delegate.listGrantedPrincipals(role);
}
@Override
public Set listRoleGrants(HivePrincipal principal)
{
return delegate.listRoleGrants(principal);
}
@Override
public void grantTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption)
{
delegate.grantTablePrivileges(databaseName, tableName, tableOwner, grantee, grantor, privileges, grantOption);
}
@Override
public void revokeTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption)
{
delegate.revokeTablePrivileges(databaseName, tableName, tableOwner, grantee, grantor, privileges, grantOption);
}
@Override
public Set listTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal)
{
return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal);
}
@Override
public Optional getConfigValue(String name)
{
return delegate.getConfigValue(name);
}
@Override
public void checkSupportsTransactions()
{
delegate.checkSupportsTransactions();
}
@Override
public long openTransaction(AcidTransactionOwner transactionOwner)
{
return delegate.openTransaction(transactionOwner);
}
@Override
public void commitTransaction(long transactionId)
{
delegate.commitTransaction(transactionId);
}
@Override
public void abortTransaction(long transactionId)
{
delegate.abortTransaction(transactionId);
}
@Override
public void sendTransactionHeartbeat(long transactionId)
{
delegate.sendTransactionHeartbeat(transactionId);
}
@Override
public void acquireSharedReadLock(
AcidTransactionOwner transactionOwner,
String queryId,
long transactionId,
List fullTables,
List partitions)
{
delegate.acquireSharedReadLock(transactionOwner, queryId, transactionId, fullTables, partitions);
}
@Override
public String getValidWriteIds(List tables, long currentTransactionId)
{
return delegate.getValidWriteIds(tables, currentTransactionId);
}
@Override
public long allocateWriteId(String dbName, String tableName, long transactionId)
{
return delegate.allocateWriteId(dbName, tableName, transactionId);
}
@Override
public void acquireTableWriteLock(
AcidTransactionOwner transactionOwner,
String queryId,
long transactionId,
String dbName,
String tableName,
DataOperationType operation,
boolean isDynamicPartitionWrite)
{
delegate.acquireTableWriteLock(transactionOwner, queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite);
}
@Override
public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
{
delegate.updateTableWriteId(dbName, tableName, transactionId, writeId, rowCountChange);
}
@Override
public void alterPartitions(String dbName, String tableName, List partitions, long writeId)
{
List hadoopPartitions = partitions.stream()
.map(ThriftMetastoreUtil::toMetastoreApiPartition)
.peek(partition -> partition.setWriteId(writeId))
.collect(toImmutableList());
delegate.alterPartitions(dbName, tableName, hadoopPartitions, writeId);
}
@Override
public void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation)
{
delegate.addDynamicPartitions(dbName, tableName, partitionNames, transactionId, writeId, operation);
}
@Override
public void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges)
{
delegate.alterTransactionalTable(toMetastoreApiTable(table, principalPrivileges), transactionId, writeId);
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy