io.trino.plugin.hive.parquet.ParquetPageSourceFactory Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of trino-hive Show documentation
Show all versions of trino-hive Show documentation
This is a Databricks build of Trino's Hive plugin which includes support for HTTP based transport
for it's Hive metastore thrift interface.
The newest version!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.hive.parquet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.trino.filesystem.TrinoFileSystem;
import io.trino.filesystem.TrinoFileSystemFactory;
import io.trino.filesystem.TrinoInputFile;
import io.trino.parquet.BloomFilterStore;
import io.trino.parquet.ParquetCorruptionException;
import io.trino.parquet.ParquetDataSource;
import io.trino.parquet.ParquetDataSourceId;
import io.trino.parquet.ParquetReaderOptions;
import io.trino.parquet.ParquetWriteValidation;
import io.trino.parquet.predicate.TupleDomainParquetPredicate;
import io.trino.parquet.reader.MetadataReader;
import io.trino.parquet.reader.ParquetReader;
import io.trino.parquet.reader.ParquetReaderColumn;
import io.trino.parquet.reader.TrinoColumnIndexStore;
import io.trino.plugin.hive.AcidInfo;
import io.trino.plugin.hive.FileFormatDataSourceStats;
import io.trino.plugin.hive.HiveColumnHandle;
import io.trino.plugin.hive.HiveConfig;
import io.trino.plugin.hive.HivePageSourceFactory;
import io.trino.plugin.hive.HiveType;
import io.trino.plugin.hive.ReaderColumns;
import io.trino.plugin.hive.ReaderPageSource;
import io.trino.plugin.hive.acid.AcidTransaction;
import io.trino.spi.TrinoException;
import io.trino.spi.connector.ConnectorPageSource;
import io.trino.spi.connector.ConnectorSession;
import io.trino.spi.predicate.Domain;
import io.trino.spi.predicate.TupleDomain;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.hadoop.metadata.BlockMetaData;
import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
import org.apache.parquet.hadoop.metadata.ColumnPath;
import org.apache.parquet.hadoop.metadata.FileMetaData;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.apache.parquet.internal.filter2.columnindex.ColumnIndexStore;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.joda.time.DateTimeZone;
import javax.inject.Inject;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Properties;
import java.util.Set;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Strings.nullToEmpty;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static io.trino.memory.context.AggregatedMemoryContext.newSimpleAggregatedMemoryContext;
import static io.trino.parquet.ParquetTypeUtils.constructField;
import static io.trino.parquet.ParquetTypeUtils.getColumnIO;
import static io.trino.parquet.ParquetTypeUtils.getDescriptors;
import static io.trino.parquet.ParquetTypeUtils.getParquetTypeByName;
import static io.trino.parquet.ParquetTypeUtils.lookupColumnByName;
import static io.trino.parquet.predicate.PredicateUtils.buildPredicate;
import static io.trino.parquet.predicate.PredicateUtils.predicateMatches;
import static io.trino.parquet.reader.ParquetReaderColumn.getParquetReaderFields;
import static io.trino.plugin.hive.HiveColumnHandle.ColumnType.REGULAR;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_BAD_DATA;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_CANNOT_OPEN_SPLIT;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_MISSING_DATA;
import static io.trino.plugin.hive.HivePageSourceProvider.projectBaseColumns;
import static io.trino.plugin.hive.HivePageSourceProvider.projectSufficientColumns;
import static io.trino.plugin.hive.HiveSessionProperties.getParquetMaxReadBlockRowCount;
import static io.trino.plugin.hive.HiveSessionProperties.getParquetMaxReadBlockSize;
import static io.trino.plugin.hive.HiveSessionProperties.isParquetIgnoreStatistics;
import static io.trino.plugin.hive.HiveSessionProperties.isParquetOptimizedNestedReaderEnabled;
import static io.trino.plugin.hive.HiveSessionProperties.isParquetOptimizedReaderEnabled;
import static io.trino.plugin.hive.HiveSessionProperties.isParquetUseColumnIndex;
import static io.trino.plugin.hive.HiveSessionProperties.isUseParquetColumnNames;
import static io.trino.plugin.hive.HiveSessionProperties.useParquetBloomFilter;
import static io.trino.plugin.hive.parquet.ParquetPageSource.handleException;
import static io.trino.plugin.hive.type.Category.PRIMITIVE;
import static io.trino.plugin.hive.util.HiveClassNames.PARQUET_HIVE_SERDE_CLASS;
import static io.trino.plugin.hive.util.HiveUtil.getDeserializerClassName;
import static io.trino.plugin.hive.util.SerdeConstants.SERIALIZATION_LIB;
import static io.trino.spi.type.BigintType.BIGINT;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toUnmodifiableList;
public class ParquetPageSourceFactory
implements HivePageSourceFactory
{
/**
* If this object is passed as one of the columns for {@code createPageSource},
* it will be populated as an additional column containing the index of each
* row read.
*/
public static final HiveColumnHandle PARQUET_ROW_INDEX_COLUMN = new HiveColumnHandle(
"$parquet$row_index",
-1, // no real column index
HiveType.HIVE_LONG,
BIGINT,
Optional.empty(),
HiveColumnHandle.ColumnType.SYNTHESIZED,
Optional.empty());
private static final Set PARQUET_SERDE_CLASS_NAMES = ImmutableSet.builder()
.add(PARQUET_HIVE_SERDE_CLASS)
.add("parquet.hive.serde.ParquetHiveSerDe")
.build();
private final TrinoFileSystemFactory fileSystemFactory;
private final FileFormatDataSourceStats stats;
private final ParquetReaderOptions options;
private final DateTimeZone timeZone;
private final int domainCompactionThreshold;
@Inject
public ParquetPageSourceFactory(
TrinoFileSystemFactory fileSystemFactory,
FileFormatDataSourceStats stats,
ParquetReaderConfig config,
HiveConfig hiveConfig)
{
this.fileSystemFactory = requireNonNull(fileSystemFactory, "fileSystemFactory is null");
this.stats = requireNonNull(stats, "stats is null");
options = config.toParquetReaderOptions();
timeZone = hiveConfig.getParquetDateTimeZone();
domainCompactionThreshold = hiveConfig.getDomainCompactionThreshold();
}
public static Properties stripUnnecessaryProperties(Properties schema)
{
if (PARQUET_SERDE_CLASS_NAMES.contains(getDeserializerClassName(schema))) {
Properties stripped = new Properties();
stripped.put(SERIALIZATION_LIB, schema.getProperty(SERIALIZATION_LIB));
return stripped;
}
return schema;
}
@Override
public Optional createPageSource(
Configuration configuration,
ConnectorSession session,
Path path,
long start,
long length,
long estimatedFileSize,
Properties schema,
List columns,
TupleDomain effectivePredicate,
Optional acidInfo,
OptionalInt bucketNumber,
boolean originalFile,
AcidTransaction transaction)
{
if (!PARQUET_SERDE_CLASS_NAMES.contains(getDeserializerClassName(schema))) {
return Optional.empty();
}
checkArgument(acidInfo.isEmpty(), "Acid is not supported");
TrinoFileSystem fileSystem = fileSystemFactory.create(session);
TrinoInputFile inputFile = fileSystem.newInputFile(path.toString(), estimatedFileSize);
return Optional.of(createPageSource(
inputFile,
start,
length,
columns,
effectivePredicate,
isUseParquetColumnNames(session),
timeZone,
stats,
options.withIgnoreStatistics(isParquetIgnoreStatistics(session))
.withMaxReadBlockSize(getParquetMaxReadBlockSize(session))
.withMaxReadBlockRowCount(getParquetMaxReadBlockRowCount(session))
.withUseColumnIndex(isParquetUseColumnIndex(session))
.withBloomFilter(useParquetBloomFilter(session))
.withBatchColumnReaders(isParquetOptimizedReaderEnabled(session))
.withBatchNestedColumnReaders(isParquetOptimizedNestedReaderEnabled(session)),
Optional.empty(),
domainCompactionThreshold));
}
/**
* This method is available for other callers to use directly.
*/
public static ReaderPageSource createPageSource(
TrinoInputFile inputFile,
long start,
long length,
List columns,
TupleDomain effectivePredicate,
boolean useColumnNames,
DateTimeZone timeZone,
FileFormatDataSourceStats stats,
ParquetReaderOptions options,
Optional parquetWriteValidation,
int domainCompactionThreshold)
{
// Ignore predicates on partial columns for now.
effectivePredicate = effectivePredicate.filter((column, domain) -> column.isBaseColumn());
MessageType fileSchema;
MessageType requestedSchema;
MessageColumnIO messageColumn;
ParquetDataSource dataSource = null;
try {
dataSource = new TrinoParquetDataSource(inputFile, options, stats);
ParquetMetadata parquetMetadata = MetadataReader.readFooter(dataSource, parquetWriteValidation);
FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
fileSchema = fileMetaData.getSchema();
Optional message = getParquetMessageType(columns, useColumnNames, fileSchema);
requestedSchema = message.orElse(new MessageType(fileSchema.getName(), ImmutableList.of()));
messageColumn = getColumnIO(fileSchema, requestedSchema);
Map, ColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
TupleDomain parquetTupleDomain = options.isIgnoreStatistics()
? TupleDomain.all()
: getParquetTupleDomain(descriptorsByPath, effectivePredicate, fileSchema, useColumnNames);
TupleDomainParquetPredicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath, timeZone);
long nextStart = 0;
ImmutableList.Builder blocks = ImmutableList.builder();
ImmutableList.Builder blockStarts = ImmutableList.builder();
ImmutableList.Builder> columnIndexes = ImmutableList.builder();
for (BlockMetaData block : parquetMetadata.getBlocks()) {
long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
Optional columnIndex = getColumnIndexStore(dataSource, block, descriptorsByPath, parquetTupleDomain, options);
Optional bloomFilterStore = getBloomFilterStore(dataSource, block, parquetTupleDomain, options);
if (start <= firstDataPage && firstDataPage < start + length
&& predicateMatches(
parquetPredicate,
block,
dataSource,
descriptorsByPath,
parquetTupleDomain,
columnIndex,
bloomFilterStore,
timeZone,
domainCompactionThreshold)) {
blocks.add(block);
blockStarts.add(nextStart);
columnIndexes.add(columnIndex);
}
nextStart += block.getRowCount();
}
Optional readerProjections = projectBaseColumns(columns);
List baseColumns = readerProjections.map(projection ->
projection.get().stream()
.map(HiveColumnHandle.class::cast)
.collect(toUnmodifiableList()))
.orElse(columns);
List parquetReaderColumns = createParquetReaderColumns(baseColumns, fileSchema, messageColumn, useColumnNames);
ParquetDataSourceId dataSourceId = dataSource.getId();
ConnectorPageSource parquetPageSource = new ParquetPageSource(
new ParquetReader(
Optional.ofNullable(fileMetaData.getCreatedBy()),
getParquetReaderFields(parquetReaderColumns),
blocks.build(),
blockStarts.build(),
dataSource,
timeZone,
newSimpleAggregatedMemoryContext(),
options,
exception -> handleException(dataSourceId, exception),
Optional.of(parquetPredicate),
columnIndexes.build(),
parquetWriteValidation),
parquetReaderColumns);
return new ReaderPageSource(parquetPageSource, readerProjections);
}
catch (Exception e) {
try {
if (dataSource != null) {
dataSource.close();
}
}
catch (IOException ignored) {
}
if (e instanceof TrinoException) {
throw (TrinoException) e;
}
if (e instanceof ParquetCorruptionException) {
throw new TrinoException(HIVE_BAD_DATA, e);
}
if (nullToEmpty(e.getMessage()).trim().equals("Filesystem closed") ||
e instanceof FileNotFoundException) {
throw new TrinoException(HIVE_CANNOT_OPEN_SPLIT, e);
}
String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", inputFile.location(), start, length, e.getMessage());
if (e instanceof BlockMissingException) {
throw new TrinoException(HIVE_MISSING_DATA, message, e);
}
throw new TrinoException(HIVE_CANNOT_OPEN_SPLIT, message, e);
}
}
public static Optional getParquetMessageType(List columns, boolean useColumnNames, MessageType fileSchema)
{
Optional message = projectSufficientColumns(columns)
.map(projection -> projection.get().stream()
.map(HiveColumnHandle.class::cast)
.collect(toUnmodifiableList()))
.orElse(columns).stream()
.filter(column -> column.getColumnType() == REGULAR)
.map(column -> getColumnType(column, fileSchema, useColumnNames))
.filter(Optional::isPresent)
.map(Optional::get)
.map(type -> new MessageType(fileSchema.getName(), type))
.reduce(MessageType::union);
return message;
}
public static Optional getParquetType(GroupType groupType, boolean useParquetColumnNames, HiveColumnHandle column)
{
if (useParquetColumnNames) {
return Optional.ofNullable(getParquetTypeByName(column.getBaseColumnName(), groupType));
}
if (column.getBaseHiveColumnIndex() < groupType.getFieldCount()) {
return Optional.of(groupType.getType(column.getBaseHiveColumnIndex()));
}
return Optional.empty();
}
public static Optional getColumnType(HiveColumnHandle column, MessageType messageType, boolean useParquetColumnNames)
{
Optional columnType = getParquetType(messageType, useParquetColumnNames, column);
if (columnType.isEmpty() || column.getHiveColumnProjectionInfo().isEmpty()) {
return columnType;
}
GroupType baseType = columnType.get().asGroupType();
ImmutableList.Builder typeBuilder = ImmutableList.builder();
org.apache.parquet.schema.Type parentType = baseType;
for (String name : column.getHiveColumnProjectionInfo().get().getDereferenceNames()) {
org.apache.parquet.schema.Type childType = getParquetTypeByName(name, parentType.asGroupType());
if (childType == null) {
return Optional.empty();
}
typeBuilder.add(childType);
parentType = childType;
}
List subfieldTypes = typeBuilder.build();
org.apache.parquet.schema.Type type = subfieldTypes.get(subfieldTypes.size() - 1);
for (int i = subfieldTypes.size() - 2; i >= 0; --i) {
GroupType groupType = subfieldTypes.get(i).asGroupType();
type = new GroupType(groupType.getRepetition(), groupType.getName(), ImmutableList.of(type));
}
return Optional.of(new GroupType(baseType.getRepetition(), baseType.getName(), ImmutableList.of(type)));
}
public static Optional getColumnIndexStore(
ParquetDataSource dataSource,
BlockMetaData blockMetadata,
Map, ColumnDescriptor> descriptorsByPath,
TupleDomain parquetTupleDomain,
ParquetReaderOptions options)
{
if (!options.isUseColumnIndex() || parquetTupleDomain.isAll() || parquetTupleDomain.isNone()) {
return Optional.empty();
}
boolean hasColumnIndex = false;
for (ColumnChunkMetaData column : blockMetadata.getColumns()) {
if (column.getColumnIndexReference() != null && column.getOffsetIndexReference() != null) {
hasColumnIndex = true;
break;
}
}
if (!hasColumnIndex) {
return Optional.empty();
}
Set columnsReadPaths = new HashSet<>(descriptorsByPath.size());
for (List path : descriptorsByPath.keySet()) {
columnsReadPaths.add(ColumnPath.get(path.toArray(new String[0])));
}
Map parquetDomains = parquetTupleDomain.getDomains()
.orElseThrow(() -> new IllegalStateException("Predicate other than none should have domains"));
Set columnsFilteredPaths = parquetDomains.keySet().stream()
.map(column -> ColumnPath.get(column.getPath()))
.collect(toImmutableSet());
return Optional.of(new TrinoColumnIndexStore(dataSource, blockMetadata, columnsReadPaths, columnsFilteredPaths));
}
public static Optional getBloomFilterStore(
ParquetDataSource dataSource,
BlockMetaData blockMetadata,
TupleDomain parquetTupleDomain,
ParquetReaderOptions options)
{
if (!options.useBloomFilter() || parquetTupleDomain.isAll() || parquetTupleDomain.isNone()) {
return Optional.empty();
}
boolean hasBloomFilter = blockMetadata.getColumns().stream().anyMatch(BloomFilterStore::hasBloomFilter);
if (!hasBloomFilter) {
return Optional.empty();
}
Map parquetDomains = parquetTupleDomain.getDomains()
.orElseThrow(() -> new IllegalStateException("Predicate other than none should have domains"));
Set columnsFilteredPaths = parquetDomains.keySet().stream()
.map(column -> ColumnPath.get(column.getPath()))
.collect(toImmutableSet());
return Optional.of(new BloomFilterStore(dataSource, blockMetadata, columnsFilteredPaths));
}
public static TupleDomain getParquetTupleDomain(
Map, ColumnDescriptor> descriptorsByPath,
TupleDomain effectivePredicate,
MessageType fileSchema,
boolean useColumnNames)
{
if (effectivePredicate.isNone()) {
return TupleDomain.none();
}
ImmutableMap.Builder predicate = ImmutableMap.builder();
for (Entry entry : effectivePredicate.getDomains().get().entrySet()) {
HiveColumnHandle columnHandle = entry.getKey();
// skip looking up predicates for complex types as Parquet only stores stats for primitives
if (columnHandle.getHiveType().getCategory() != PRIMITIVE || columnHandle.getColumnType() != REGULAR) {
continue;
}
ColumnDescriptor descriptor;
if (useColumnNames) {
descriptor = descriptorsByPath.get(ImmutableList.of(columnHandle.getName()));
}
else {
org.apache.parquet.schema.Type parquetField = getParquetType(columnHandle, fileSchema, false);
if (parquetField == null || !parquetField.isPrimitive()) {
// Parquet file has fewer column than partition
// Or the field is a complex type
continue;
}
descriptor = descriptorsByPath.get(ImmutableList.of(parquetField.getName()));
}
if (descriptor != null) {
predicate.put(descriptor, entry.getValue());
}
}
return TupleDomain.withColumnDomains(predicate.buildOrThrow());
}
public static org.apache.parquet.schema.Type getParquetType(HiveColumnHandle column, MessageType messageType, boolean useParquetColumnNames)
{
if (useParquetColumnNames) {
return getParquetTypeByName(column.getBaseColumnName(), messageType);
}
if (column.getBaseHiveColumnIndex() < messageType.getFieldCount()) {
return messageType.getType(column.getBaseHiveColumnIndex());
}
return null;
}
public static List createParquetReaderColumns(List baseColumns, MessageType fileSchema, MessageColumnIO messageColumn, boolean useColumnNames)
{
for (HiveColumnHandle column : baseColumns) {
checkArgument(column == PARQUET_ROW_INDEX_COLUMN || column.getColumnType() == REGULAR, "column type must be REGULAR: %s", column);
}
return baseColumns.stream()
.map(column -> {
boolean isRowIndexColumn = column == PARQUET_ROW_INDEX_COLUMN;
return new ParquetReaderColumn(
column.getBaseType(),
isRowIndexColumn ? Optional.empty() : Optional.ofNullable(getParquetType(column, fileSchema, useColumnNames))
.flatMap(field -> {
String columnName = useColumnNames ? column.getBaseColumnName() : fileSchema.getFields().get(column.getBaseHiveColumnIndex()).getName();
return constructField(column.getBaseType(), lookupColumnByName(messageColumn, columnName));
}),
isRowIndexColumn);
})
.collect(toImmutableList());
}
}