Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.facebook.presto.iceberg.IcebergMetadata Maven / Gradle / Ivy
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.iceberg;
import com.facebook.airlift.json.JsonCodec;
import com.facebook.airlift.log.Logger;
import com.facebook.presto.common.predicate.TupleDomain;
import com.facebook.presto.common.type.TypeManager;
import com.facebook.presto.hive.HdfsContext;
import com.facebook.presto.hive.HdfsEnvironment;
import com.facebook.presto.hive.HiveWrittenPartitions;
import com.facebook.presto.hive.TableAlreadyExistsException;
import com.facebook.presto.hive.metastore.Database;
import com.facebook.presto.hive.metastore.ExtendedHiveMetastore;
import com.facebook.presto.hive.metastore.MetastoreContext;
import com.facebook.presto.hive.metastore.Table;
import com.facebook.presto.spi.ColumnHandle;
import com.facebook.presto.spi.ColumnMetadata;
import com.facebook.presto.spi.ConnectorInsertTableHandle;
import com.facebook.presto.spi.ConnectorNewTableLayout;
import com.facebook.presto.spi.ConnectorOutputTableHandle;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.ConnectorTableHandle;
import com.facebook.presto.spi.ConnectorTableLayout;
import com.facebook.presto.spi.ConnectorTableLayoutHandle;
import com.facebook.presto.spi.ConnectorTableLayoutResult;
import com.facebook.presto.spi.ConnectorTableMetadata;
import com.facebook.presto.spi.Constraint;
import com.facebook.presto.spi.PrestoException;
import com.facebook.presto.spi.SchemaNotFoundException;
import com.facebook.presto.spi.SchemaTableName;
import com.facebook.presto.spi.SchemaTablePrefix;
import com.facebook.presto.spi.SystemTable;
import com.facebook.presto.spi.TableNotFoundException;
import com.facebook.presto.spi.connector.ConnectorMetadata;
import com.facebook.presto.spi.connector.ConnectorOutputMetadata;
import com.facebook.presto.spi.statistics.ComputedStatistics;
import com.facebook.presto.spi.statistics.TableStatistics;
import com.google.common.base.VerifyException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.airlift.slice.Slice;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.Transaction;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.NestedField;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import static com.facebook.presto.common.type.BigintType.BIGINT;
import static com.facebook.presto.hive.HiveMetadata.TABLE_COMMENT;
import static com.facebook.presto.iceberg.IcebergSchemaProperties.getSchemaLocation;
import static com.facebook.presto.iceberg.IcebergTableProperties.FILE_FORMAT_PROPERTY;
import static com.facebook.presto.iceberg.IcebergTableProperties.PARTITIONING_PROPERTY;
import static com.facebook.presto.iceberg.IcebergTableProperties.getFileFormat;
import static com.facebook.presto.iceberg.IcebergTableProperties.getPartitioning;
import static com.facebook.presto.iceberg.IcebergTableProperties.getTableLocation;
import static com.facebook.presto.iceberg.IcebergUtil.getColumns;
import static com.facebook.presto.iceberg.IcebergUtil.getDataPath;
import static com.facebook.presto.iceberg.IcebergUtil.getFileFormat;
import static com.facebook.presto.iceberg.IcebergUtil.getIcebergTable;
import static com.facebook.presto.iceberg.IcebergUtil.getTableComment;
import static com.facebook.presto.iceberg.IcebergUtil.isIcebergTable;
import static com.facebook.presto.iceberg.PartitionFields.parsePartitionFields;
import static com.facebook.presto.iceberg.PartitionFields.toPartitionFields;
import static com.facebook.presto.iceberg.TableType.DATA;
import static com.facebook.presto.iceberg.TypeConverter.toIcebergType;
import static com.facebook.presto.iceberg.TypeConverter.toPrestoType;
import static com.facebook.presto.spi.StandardErrorCode.INVALID_SCHEMA_PROPERTY;
import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED;
import static com.facebook.presto.spi.StandardErrorCode.SCHEMA_NOT_EMPTY;
import static com.facebook.presto.spi.security.PrincipalType.USER;
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static java.util.Objects.requireNonNull;
import static java.util.function.Function.identity;
import static java.util.stream.Collectors.toList;
import static org.apache.iceberg.TableMetadata.newTableMetadata;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
import static org.apache.iceberg.Transactions.createTableTransaction;
public class IcebergMetadata
implements ConnectorMetadata
{
private static final Logger log = Logger.get(IcebergMetadata.class);
private final ExtendedHiveMetastore metastore;
private final HdfsEnvironment hdfsEnvironment;
private final TypeManager typeManager;
private final JsonCodec commitTaskCodec;
private final Map> snapshotIds = new ConcurrentHashMap<>();
private Transaction transaction;
public IcebergMetadata(
ExtendedHiveMetastore metastore,
HdfsEnvironment hdfsEnvironment,
TypeManager typeManager,
JsonCodec commitTaskCodec)
{
this.metastore = requireNonNull(metastore, "metastore is null");
this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
this.typeManager = requireNonNull(typeManager, "typeManager is null");
this.commitTaskCodec = requireNonNull(commitTaskCodec, "commitTaskCodec is null");
}
@Override
public List listSchemaNames(ConnectorSession session)
{
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
return metastore.getAllDatabases(metastoreContext);
}
@Override
public IcebergTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName)
{
IcebergTableName name = IcebergTableName.from(tableName.getTableName());
verify(name.getTableType() == DATA, "Wrong table type: " + name.getTableType());
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
Optional hiveTable = metastore.getTable(metastoreContext, tableName.getSchemaName(), name.getTableName());
if (!hiveTable.isPresent()) {
return null;
}
if (!isIcebergTable(hiveTable.get())) {
throw new UnknownTableTypeException(tableName);
}
org.apache.iceberg.Table table = getIcebergTable(metastore, hdfsEnvironment, session, tableName);
Optional snapshotId = getSnapshotId(table, name.getSnapshotId());
return new IcebergTableHandle(
tableName.getSchemaName(),
name.getTableName(),
name.getTableType(),
snapshotId,
TupleDomain.all());
}
@Override
public List getTableLayouts(ConnectorSession session, ConnectorTableHandle table, Constraint constraint, Optional> desiredColumns)
{
IcebergTableHandle handle = (IcebergTableHandle) table;
ConnectorTableLayout layout = new ConnectorTableLayout(new IcebergTableLayoutHandle(handle, constraint.getSummary()));
return ImmutableList.of(new ConnectorTableLayoutResult(layout, constraint.getSummary()));
}
@Override
public ConnectorTableLayout getTableLayout(ConnectorSession session, ConnectorTableLayoutHandle handle)
{
return new ConnectorTableLayout(handle);
}
@Override
public Optional getSystemTable(ConnectorSession session, SchemaTableName tableName)
{
return getRawSystemTable(session, tableName);
}
private Optional getRawSystemTable(ConnectorSession session, SchemaTableName tableName)
{
IcebergTableName name = IcebergTableName.from(tableName.getTableName());
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
Optional hiveTable = metastore.getTable(metastoreContext, tableName.getSchemaName(), name.getTableName());
if (!hiveTable.isPresent() || !isIcebergTable(hiveTable.get())) {
return Optional.empty();
}
org.apache.iceberg.Table table = getIcebergTable(metastore, hdfsEnvironment, session, tableName);
SchemaTableName systemTableName = new SchemaTableName(tableName.getSchemaName(), name.getTableNameWithType());
switch (name.getTableType()) {
case DATA:
break;
case HISTORY:
if (name.getSnapshotId().isPresent()) {
throw new PrestoException(NOT_SUPPORTED, "Snapshot ID not supported for history table: " + systemTableName);
}
return Optional.of(new HistoryTable(systemTableName, table));
case SNAPSHOTS:
if (name.getSnapshotId().isPresent()) {
throw new PrestoException(NOT_SUPPORTED, "Snapshot ID not supported for snapshots table: " + systemTableName);
}
return Optional.of(new SnapshotsTable(systemTableName, typeManager, table));
case PARTITIONS:
return Optional.of(new PartitionTable(systemTableName, typeManager, table, getSnapshotId(table, name.getSnapshotId())));
case MANIFESTS:
return Optional.of(new ManifestsTable(systemTableName, table, getSnapshotId(table, name.getSnapshotId())));
}
return Optional.empty();
}
@Override
public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table)
{
return getTableMetadata(session, ((IcebergTableHandle) table).getSchemaTableName());
}
@Override
public List listTables(ConnectorSession session, Optional schemaName)
{
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
return metastore
.getAllTables(metastoreContext, schemaName.get())
.orElseGet(() -> metastore.getAllDatabases(metastoreContext))
.stream()
.map(table -> new SchemaTableName(schemaName.get(), table))
.collect(toList());
}
@Override
public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle)
{
IcebergTableHandle table = (IcebergTableHandle) tableHandle;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, table.getSchemaTableName());
return getColumns(icebergTable.schema(), typeManager).stream()
.collect(toImmutableMap(IcebergColumnHandle::getName, identity()));
}
@Override
public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle)
{
IcebergColumnHandle column = (IcebergColumnHandle) columnHandle;
return new ColumnMetadata(column.getName(), column.getType(), column.getComment().orElse(""), false);
}
@Override
public Map> listTableColumns(ConnectorSession session, SchemaTablePrefix prefix)
{
List tables = listTables(session, Optional.of(prefix.getSchemaName()));
ImmutableMap.Builder> columns = ImmutableMap.builder();
for (SchemaTableName table : tables) {
try {
columns.put(table, getTableMetadata(session, table).getColumns());
}
catch (TableNotFoundException e) {
log.warn(String.format("table disappeared during listing operation: %s", e.getMessage()));
}
catch (UnknownTableTypeException e) {
log.warn(String.format("%s: Unknown table type of table %s", e.getMessage(), table.getTableName()));
}
}
return columns.build();
}
@Override
public void createSchema(ConnectorSession session, String schemaName, Map properties)
{
Optional location = getSchemaLocation(properties).map(uri -> {
try {
hdfsEnvironment.getFileSystem(new HdfsContext(session, schemaName), new Path(uri));
}
catch (IOException | IllegalArgumentException e) {
throw new PrestoException(INVALID_SCHEMA_PROPERTY, "Invalid location URI: " + uri, e);
}
return uri;
});
Database database = Database.builder()
.setDatabaseName(schemaName)
.setLocation(location)
.setOwnerType(USER)
.setOwnerName(session.getUser())
.build();
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
metastore.createDatabase(metastoreContext, database);
}
@Override
public void dropSchema(ConnectorSession session, String schemaName)
{
// basic sanity check to provide a better error message
if (!listTables(session, Optional.of(schemaName)).isEmpty() ||
!listViews(session, Optional.of(schemaName)).isEmpty()) {
throw new PrestoException(SCHEMA_NOT_EMPTY, "Schema not empty: " + schemaName);
}
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
metastore.dropDatabase(metastoreContext, schemaName);
}
@Override
public void renameSchema(ConnectorSession session, String source, String target)
{
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
metastore.renameDatabase(metastoreContext, source, target);
}
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting)
{
Optional layout = getNewTableLayout(session, tableMetadata);
finishCreateTable(session, beginCreateTable(session, tableMetadata, layout), ImmutableList.of(), ImmutableList.of());
}
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional layout)
{
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Schema schema = toIcebergSchema(tableMetadata.getColumns());
PartitionSpec partitionSpec = parsePartitionFields(schema, getPartitioning(tableMetadata.getProperties()));
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
Database database = metastore.getDatabase(metastoreContext, schemaName)
.orElseThrow(() -> new SchemaNotFoundException(schemaName));
HdfsContext hdfsContext = new HdfsContext(session, schemaName, tableName);
String targetPath = getTableLocation(tableMetadata.getProperties());
if (targetPath == null) {
Optional location = database.getLocation();
if (!location.isPresent() || location.get().isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Database " + schemaName + " location is not set");
}
Path databasePath = new Path(location.get());
Path resultPath = new Path(databasePath, tableName);
targetPath = resultPath.toString();
}
TableOperations operations = new HiveTableOperations(
metastore,
new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource()),
hdfsEnvironment,
hdfsContext,
schemaName,
tableName,
session.getUser(),
targetPath);
if (operations.current() != null) {
throw new TableAlreadyExistsException(schemaTableName);
}
ImmutableMap.Builder propertiesBuilder = ImmutableMap.builderWithExpectedSize(2);
FileFormat fileFormat = getFileFormat(tableMetadata.getProperties());
propertiesBuilder.put(DEFAULT_FILE_FORMAT, fileFormat.toString());
if (tableMetadata.getComment().isPresent()) {
propertiesBuilder.put(TABLE_COMMENT, tableMetadata.getComment().get());
}
TableMetadata metadata = newTableMetadata(schema, partitionSpec, targetPath, propertiesBuilder.build());
transaction = createTableTransaction(tableName, operations, metadata);
return new IcebergWritableTableHandle(
schemaName,
tableName,
SchemaParser.toJson(metadata.schema()),
PartitionSpecParser.toJson(metadata.spec()),
getColumns(metadata.schema(), typeManager),
targetPath,
fileFormat);
}
@Override
public Optional finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection fragments, Collection computedStatistics)
{
return finishInsert(session, (IcebergWritableTableHandle) tableHandle, fragments, computedStatistics);
}
@Override
public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle)
{
IcebergTableHandle table = (IcebergTableHandle) tableHandle;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, table.getSchemaTableName());
transaction = icebergTable.newTransaction();
return new IcebergWritableTableHandle(
table.getSchemaName(),
table.getTableName(),
SchemaParser.toJson(icebergTable.schema()),
PartitionSpecParser.toJson(icebergTable.spec()),
getColumns(icebergTable.schema(), typeManager),
getDataPath(icebergTable.location()),
getFileFormat(icebergTable));
}
@Override
public Optional finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection fragments, Collection computedStatistics)
{
IcebergWritableTableHandle table = (IcebergWritableTableHandle) insertHandle;
org.apache.iceberg.Table icebergTable = transaction.table();
List commitTasks = fragments.stream()
.map(slice -> commitTaskCodec.fromJson(slice.getBytes()))
.collect(toImmutableList());
Type[] partitionColumnTypes = icebergTable.spec().fields().stream()
.map(field -> field.transform().getResultType(
icebergTable.schema().findType(field.sourceId())))
.toArray(Type[]::new);
AppendFiles appendFiles = transaction.newFastAppend();
for (CommitTaskData task : commitTasks) {
HdfsContext context = new HdfsContext(session, table.getSchemaName(), table.getTableName());
DataFiles.Builder builder = DataFiles.builder(icebergTable.spec())
.withInputFile(new HdfsInputFile(new Path(task.getPath()), hdfsEnvironment, context))
.withFormat(table.getFileFormat())
.withMetrics(task.getMetrics().metrics());
if (!icebergTable.spec().fields().isEmpty()) {
String partitionDataJson = task.getPartitionDataJson()
.orElseThrow(() -> new VerifyException("No partition data for partitioned table"));
builder.withPartition(PartitionData.fromJson(partitionDataJson, partitionColumnTypes));
}
appendFiles.appendFile(builder.build());
}
appendFiles.commit();
transaction.commitTransaction();
return Optional.of(new HiveWrittenPartitions(commitTasks.stream()
.map(CommitTaskData::getPath)
.collect(toImmutableList())));
}
@Override
public ColumnHandle getUpdateRowIdColumnHandle(ConnectorSession session, ConnectorTableHandle tableHandle)
{
return new IcebergColumnHandle(0, "$row_id", BIGINT, Optional.empty());
}
@Override
public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle)
{
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
metastore.dropTable(metastoreContext, handle.getSchemaName(), handle.getTableName(), true);
}
@Override
public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTable)
{
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
metastore.renameTable(metastoreContext, handle.getSchemaName(), handle.getTableName(), newTable.getSchemaName(), newTable.getTableName());
}
@Override
public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnMetadata column)
{
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, handle.getSchemaTableName());
icebergTable.updateSchema().addColumn(column.getName(), toIcebergType(column.getType())).commit();
}
@Override
public void dropColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle column)
{
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) tableHandle;
IcebergColumnHandle handle = (IcebergColumnHandle) column;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, icebergTableHandle.getSchemaTableName());
icebergTable.updateSchema().deleteColumn(handle.getName()).commit();
}
@Override
public void renameColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle source, String target)
{
IcebergTableHandle icebergTableHandle = (IcebergTableHandle) tableHandle;
IcebergColumnHandle columnHandle = (IcebergColumnHandle) source;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, icebergTableHandle.getSchemaTableName());
icebergTable.updateSchema().renameColumn(columnHandle.getName(), target).commit();
}
private ConnectorTableMetadata getTableMetadata(ConnectorSession session, SchemaTableName table)
{
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource());
if (!metastore.getTable(metastoreContext, table.getSchemaName(), table.getTableName()).isPresent()) {
throw new TableNotFoundException(table);
}
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, table);
List columns = getColumnMetadatas(icebergTable);
ImmutableMap.Builder properties = ImmutableMap.builder();
properties.put(FILE_FORMAT_PROPERTY, getFileFormat(icebergTable));
if (!icebergTable.spec().fields().isEmpty()) {
properties.put(PARTITIONING_PROPERTY, toPartitionFields(icebergTable.spec()));
}
return new ConnectorTableMetadata(table, columns, properties.build(), getTableComment(icebergTable));
}
private List getColumnMetadatas(org.apache.iceberg.Table table)
{
return table.schema().columns().stream()
.map(column -> {
return new ColumnMetadata(column.name(), toPrestoType(column.type(), typeManager), column.doc(), false);
})
.collect(toImmutableList());
}
private static Schema toIcebergSchema(List columns)
{
List icebergColumns = new ArrayList<>();
for (ColumnMetadata column : columns) {
if (!column.isHidden()) {
int index = icebergColumns.size();
Type type = toIcebergType(column.getType());
NestedField field = column.isNullable()
? NestedField.optional(index, column.getName(), type, column.getComment())
: NestedField.required(index, column.getName(), type, column.getComment());
icebergColumns.add(field);
}
}
Type icebergSchema = Types.StructType.of(icebergColumns);
AtomicInteger nextFieldId = new AtomicInteger(1);
icebergSchema = TypeUtil.assignFreshIds(icebergSchema, nextFieldId::getAndIncrement);
return new Schema(icebergSchema.asStructType().fields());
}
@Override
public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle)
{
throw new PrestoException(NOT_SUPPORTED, "This connector only supports delete where one or more partitions are deleted entirely");
}
public ExtendedHiveMetastore getMetastore()
{
return metastore;
}
public void rollback()
{
// TODO: cleanup open transaction
}
@Override
public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTableHandle tableHandle, Optional tableLayoutHandle, List columnHandles, Constraint constraint)
{
IcebergTableHandle handle = (IcebergTableHandle) tableHandle;
org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, handle.getSchemaTableName());
return TableStatisticsMaker.getTableStatistics(typeManager, constraint, handle, icebergTable);
}
private Optional getSnapshotId(org.apache.iceberg.Table table, Optional snapshotId)
{
if (snapshotId.isPresent()) {
return Optional.of(IcebergUtil.resolveSnapshotId(table, snapshotId.get()));
}
return Optional.ofNullable(table.currentSnapshot()).map(Snapshot::snapshotId);
}
}