Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.metastore;
import io.trino.metastore.HivePrivilegeInfo.HivePrivilege;
import io.trino.spi.TrinoException;
import io.trino.spi.connector.SchemaTableName;
import io.trino.spi.function.LanguageFunction;
import io.trino.spi.predicate.TupleDomain;
import io.trino.spi.security.RoleGrant;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED;
public interface HiveMetastore
{
Optional getDatabase(String databaseName);
List getAllDatabases();
Optional
getTable(String databaseName, String tableName);
/**
* @param columnNames Must not be empty.
*/
Map getTableColumnStatistics(String databaseName, String tableName, Set columnNames);
/**
* @param columnNames Must not be empty.
*/
Map> getPartitionColumnStatistics(
String databaseName,
String tableName,
Set partitionNames,
Set columnNames);
/**
* If true, callers should inspect table and partition parameters for spark stats.
* This method really only exists for the ThriftHiveMetastore implementation. Spark mixes table and column statistics into the table parameters, and this breaks
* the abstractions of the metastore interface.
*/
default boolean useSparkTableStatistics()
{
return false;
}
void updateTableStatistics(String databaseName, String tableName, OptionalLong acidWriteId, StatisticsUpdateMode mode, PartitionStatistics statisticsUpdate);
void updatePartitionStatistics(Table table, StatisticsUpdateMode mode, Map partitionUpdates);
List getTables(String databaseName);
void createDatabase(Database database);
void dropDatabase(String databaseName, boolean deleteData);
void renameDatabase(String databaseName, String newDatabaseName);
void setDatabaseOwner(String databaseName, HivePrincipal principal);
void createTable(Table table, PrincipalPrivileges principalPrivileges);
void dropTable(String databaseName, String tableName, boolean deleteData);
/**
* This should only be used if the semantic here is to drop and add. Trying to
* alter one field of a table object previously acquired from getTable is
* probably not what you want.
*/
void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges);
void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName);
void commentTable(String databaseName, String tableName, Optional comment);
void setTableOwner(String databaseName, String tableName, HivePrincipal principal);
void commentColumn(String databaseName, String tableName, String columnName, Optional comment);
void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment);
void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName);
void dropColumn(String databaseName, String tableName, String columnName);
Optional getPartition(Table table, List partitionValues);
/**
* Return a list of partition names, with optional filtering (hint to improve performance if possible).
*
* @param databaseName the name of the database
* @param tableName the name of the table
* @param columnNames the list of partition column names
* @param partitionKeysFilter optional filter for the partition column values
* @return a list of partition names as created by {@code MetastoreUtil#toPartitionName}
* @see TupleDomain
*/
Optional> getPartitionNamesByFilter(String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter);
Map> getPartitionsByNames(Table table, List partitionNames);
void addPartitions(String databaseName, String tableName, List partitions);
void dropPartition(String databaseName, String tableName, List parts, boolean deleteData);
void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition);
void createRole(String role, String grantor);
void dropRole(String role);
Set listRoles();
void grantRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor);
void revokeRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor);
Set listRoleGrants(HivePrincipal principal);
void grantTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption);
void revokeTablePrivileges(String databaseName, String tableName, String tableOwner, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption);
/**
* @param principal when empty, all table privileges are returned
*/
Set listTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal);
default void checkSupportsTransactions()
{
throw new TrinoException(NOT_SUPPORTED, getClass().getSimpleName() + " does not support ACID tables");
}
default long openTransaction(AcidTransactionOwner transactionOwner)
{
throw new UnsupportedOperationException();
}
default void commitTransaction(long transactionId)
{
throw new UnsupportedOperationException();
}
default void abortTransaction(long transactionId)
{
throw new UnsupportedOperationException();
}
default void sendTransactionHeartbeat(long transactionId)
{
throw new UnsupportedOperationException();
}
default void acquireSharedReadLock(
AcidTransactionOwner transactionOwner,
String queryId,
long transactionId,
List fullTables,
List partitions)
{
throw new UnsupportedOperationException();
}
default String getValidWriteIds(List tables, long currentTransactionId)
{
throw new UnsupportedOperationException();
}
default Optional getConfigValue(String name)
{
return Optional.empty();
}
default long allocateWriteId(String dbName, String tableName, long transactionId)
{
throw new UnsupportedOperationException();
}
default void acquireTableWriteLock(
AcidTransactionOwner transactionOwner,
String queryId,
long transactionId,
String dbName,
String tableName,
AcidOperation operation,
boolean isDynamicPartitionWrite)
{
throw new UnsupportedOperationException();
}
default void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange)
{
throw new UnsupportedOperationException();
}
default void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation)
{
throw new UnsupportedOperationException();
}
default void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges)
{
throw new UnsupportedOperationException();
}
boolean functionExists(String databaseName, String functionName, String signatureToken);
Collection getAllFunctions(String databaseName);
Collection getFunctions(String databaseName, String functionName);
void createFunction(String databaseName, String functionName, LanguageFunction function);
void replaceFunction(String databaseName, String functionName, LanguageFunction function);
void dropFunction(String databaseName, String functionName, String signatureToken);
}