org.apache.iceberg.data.parquet.BaseParquetReaders Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of iceberg-parquet Show documentation
Show all versions of iceberg-parquet Show documentation
A table format for huge analytic datasets
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data.parquet;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.Schema;
import org.apache.iceberg.parquet.ParquetSchemaUtil;
import org.apache.iceberg.parquet.ParquetValueReader;
import org.apache.iceberg.parquet.ParquetValueReaders;
import org.apache.iceberg.parquet.TypeWithSchemaVisitor;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.schema.DecimalMetadata;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
public abstract class BaseParquetReaders {
protected BaseParquetReaders() {
}
protected ParquetValueReader createReader(Schema expectedSchema,
MessageType fileSchema) {
return createReader(expectedSchema, fileSchema, ImmutableMap.of());
}
@SuppressWarnings("unchecked")
protected ParquetValueReader createReader(Schema expectedSchema,
MessageType fileSchema,
Map idToConstant) {
if (ParquetSchemaUtil.hasIds(fileSchema)) {
return (ParquetValueReader)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new ReadBuilder(fileSchema, idToConstant));
} else {
return (ParquetValueReader)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new FallbackReadBuilder(fileSchema, idToConstant));
}
}
protected abstract ParquetValueReader createStructReader(List types,
List> fieldReaders,
Types.StructType structType);
private class FallbackReadBuilder extends ReadBuilder {
private FallbackReadBuilder(MessageType type, Map idToConstant) {
super(type, idToConstant);
}
@Override
public ParquetValueReader> message(Types.StructType expected, MessageType message,
List> fieldReaders) {
// the top level matches by ID, but the remaining IDs are missing
return super.struct(expected, message, fieldReaders);
}
@Override
public ParquetValueReader> struct(Types.StructType expected, GroupType struct,
List> fieldReaders) {
// the expected struct is ignored because nested fields are never found when the
List> newFields = Lists.newArrayListWithExpectedSize(
fieldReaders.size());
List types = Lists.newArrayListWithExpectedSize(fieldReaders.size());
List fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
ParquetValueReader> fieldReader = fieldReaders.get(i);
if (fieldReader != null) {
Type fieldType = fields.get(i);
int fieldD = type().getMaxDefinitionLevel(path(fieldType.getName())) - 1;
newFields.add(ParquetValueReaders.option(fieldType, fieldD, fieldReader));
types.add(fieldType);
}
}
return createStructReader(types, newFields, expected);
}
}
private class ReadBuilder extends TypeWithSchemaVisitor> {
private final MessageType type;
private final Map idToConstant;
private ReadBuilder(MessageType type, Map idToConstant) {
this.type = type;
this.idToConstant = idToConstant;
}
@Override
public ParquetValueReader> message(Types.StructType expected, MessageType message,
List> fieldReaders) {
return struct(expected, message.asGroupType(), fieldReaders);
}
@Override
public ParquetValueReader> struct(Types.StructType expected, GroupType struct,
List> fieldReaders) {
// match the expected struct's order
Map> readersById = Maps.newHashMap();
Map typesById = Maps.newHashMap();
List fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
ParquetValueReader> fieldReader = fieldReaders.get(i);
if (fieldReader != null) {
Type fieldType = fields.get(i);
int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())) - 1;
int id = fieldType.getId().intValue();
readersById.put(id, ParquetValueReaders.option(fieldType, fieldD, fieldReader));
typesById.put(id, fieldType);
}
}
List expectedFields = expected != null ?
expected.fields() : ImmutableList.of();
List> reorderedFields = Lists.newArrayListWithExpectedSize(
expectedFields.size());
List types = Lists.newArrayListWithExpectedSize(expectedFields.size());
for (Types.NestedField field : expectedFields) {
int id = field.fieldId();
if (idToConstant.containsKey(id)) {
// containsKey is used because the constant may be null
reorderedFields.add(ParquetValueReaders.constant(idToConstant.get(id)));
types.add(null);
} else if (id == MetadataColumns.ROW_POSITION.fieldId()) {
reorderedFields.add(ParquetValueReaders.position());
types.add(null);
} else if (id == MetadataColumns.IS_DELETED.fieldId()) {
reorderedFields.add(ParquetValueReaders.constant(false));
types.add(null);
} else {
ParquetValueReader> reader = readersById.get(id);
if (reader != null) {
reorderedFields.add(reader);
types.add(typesById.get(id));
} else {
reorderedFields.add(ParquetValueReaders.nulls());
types.add(null);
}
}
}
return createStructReader(types, reorderedFields, expected);
}
@Override
public ParquetValueReader> list(Types.ListType expectedList, GroupType array,
ParquetValueReader> elementReader) {
if (expectedList == null) {
return null;
}
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type elementType = ParquetSchemaUtil.determineListElementType(array);
int elementD = type.getMaxDefinitionLevel(path(elementType.getName())) - 1;
return new ParquetValueReaders.ListReader<>(repeatedD, repeatedR,
ParquetValueReaders.option(elementType, elementD, elementReader));
}
@Override
public ParquetValueReader> map(Types.MapType expectedMap, GroupType map,
ParquetValueReader> keyReader,
ParquetValueReader> valueReader) {
if (expectedMap == null) {
return null;
}
GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type keyType = repeatedKeyValue.getType(0);
int keyD = type.getMaxDefinitionLevel(path(keyType.getName())) - 1;
Type valueType = repeatedKeyValue.getType(1);
int valueD = type.getMaxDefinitionLevel(path(valueType.getName())) - 1;
return new ParquetValueReaders.MapReader<>(repeatedD, repeatedR,
ParquetValueReaders.option(keyType, keyD, keyReader),
ParquetValueReaders.option(valueType, valueD, valueReader));
}
@Override
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public ParquetValueReader> primitive(org.apache.iceberg.types.Type.PrimitiveType expected,
PrimitiveType primitive) {
if (expected == null) {
return null;
}
ColumnDescriptor desc = type.getColumnDescription(currentPath());
if (primitive.getOriginalType() != null) {
switch (primitive.getOriginalType()) {
case ENUM:
case JSON:
case UTF8:
return new ParquetValueReaders.StringReader(desc);
case INT_8:
case INT_16:
case INT_32:
if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.LONG) {
return new ParquetValueReaders.IntAsLongReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case INT_64:
return new ParquetValueReaders.UnboxedReader<>(desc);
case DATE:
return new DateReader(desc);
case TIMESTAMP_MICROS:
Types.TimestampType tsMicrosType = (Types.TimestampType) expected;
if (tsMicrosType.shouldAdjustToUTC()) {
return new TimestamptzReader(desc);
} else {
return new TimestampReader(desc);
}
case TIMESTAMP_MILLIS:
Types.TimestampType tsMillisType = (Types.TimestampType) expected;
if (tsMillisType.shouldAdjustToUTC()) {
return new TimestamptzMillisReader(desc);
} else {
return new TimestampMillisReader(desc);
}
case TIME_MICROS:
return new TimeReader(desc);
case TIME_MILLIS:
return new TimeMillisReader(desc);
case DECIMAL:
DecimalMetadata decimal = primitive.getDecimalMetadata();
switch (primitive.getPrimitiveTypeName()) {
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return new ParquetValueReaders.BinaryAsDecimalReader(desc, decimal.getScale());
case INT64:
return new ParquetValueReaders.LongAsDecimalReader(desc, decimal.getScale());
case INT32:
return new ParquetValueReaders.IntegerAsDecimalReader(desc, decimal.getScale());
default:
throw new UnsupportedOperationException(
"Unsupported base type for decimal: " + primitive.getPrimitiveTypeName());
}
case BSON:
return new ParquetValueReaders.BytesReader(desc);
default:
throw new UnsupportedOperationException(
"Unsupported logical type: " + primitive.getOriginalType());
}
}
switch (primitive.getPrimitiveTypeName()) {
case FIXED_LEN_BYTE_ARRAY:
return new FixedReader(desc);
case BINARY:
if (expected != null && expected.typeId() == org.apache.iceberg.types.Type.TypeID.STRING) {
return new ParquetValueReaders.StringReader(desc);
} else {
return new ParquetValueReaders.BytesReader(desc);
}
case INT32:
if (expected != null && expected.typeId() == org.apache.iceberg.types.Type.TypeID.LONG) {
return new ParquetValueReaders.IntAsLongReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case FLOAT:
if (expected != null && expected.typeId() == org.apache.iceberg.types.Type.TypeID.DOUBLE) {
return new ParquetValueReaders.FloatAsDoubleReader(desc);
} else {
return new ParquetValueReaders.UnboxedReader<>(desc);
}
case BOOLEAN:
case INT64:
case DOUBLE:
return new ParquetValueReaders.UnboxedReader<>(desc);
case INT96:
// Impala & Spark used to write timestamps as INT96 without a logical type. For backwards
// compatibility we try to read INT96 as timestamps.
return new TimestampInt96Reader(desc);
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive);
}
}
MessageType type() {
return type;
}
}
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate();
private static class DateReader extends ParquetValueReaders.PrimitiveReader {
private DateReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDate read(LocalDate reuse) {
return EPOCH_DAY.plusDays(column.nextInteger());
}
}
private static class TimestampReader extends ParquetValueReaders.PrimitiveReader {
private TimestampReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDateTime read(LocalDateTime reuse) {
return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS).toLocalDateTime();
}
}
private static class TimestampMillisReader extends ParquetValueReaders.PrimitiveReader {
private TimestampMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalDateTime read(LocalDateTime reuse) {
return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS).toLocalDateTime();
}
}
private static class TimestampInt96Reader extends ParquetValueReaders.PrimitiveReader {
private static final long UNIX_EPOCH_JULIAN = 2_440_588L;
private TimestampInt96Reader(ColumnDescriptor desc) {
super(desc);
}
@Override
public OffsetDateTime read(OffsetDateTime reuse) {
final ByteBuffer byteBuffer = column.nextBinary().toByteBuffer().order(ByteOrder.LITTLE_ENDIAN);
final long timeOfDayNanos = byteBuffer.getLong();
final int julianDay = byteBuffer.getInt();
return Instant
.ofEpochMilli(TimeUnit.DAYS.toMillis(julianDay - UNIX_EPOCH_JULIAN))
.plusNanos(timeOfDayNanos).atOffset(ZoneOffset.UTC);
}
}
private static class TimestamptzReader extends ParquetValueReaders.PrimitiveReader {
private TimestamptzReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public OffsetDateTime read(OffsetDateTime reuse) {
return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS);
}
}
private static class TimestamptzMillisReader extends ParquetValueReaders.PrimitiveReader {
private TimestamptzMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public OffsetDateTime read(OffsetDateTime reuse) {
return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS);
}
}
private static class TimeMillisReader extends ParquetValueReaders.PrimitiveReader {
private TimeMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalTime read(LocalTime reuse) {
return LocalTime.ofNanoOfDay(column.nextLong() * 1000000L);
}
}
private static class TimeReader extends ParquetValueReaders.PrimitiveReader {
private TimeReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public LocalTime read(LocalTime reuse) {
return LocalTime.ofNanoOfDay(column.nextLong() * 1000L);
}
}
private static class FixedReader extends ParquetValueReaders.PrimitiveReader {
private FixedReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public byte[] read(byte[] reuse) {
if (reuse != null) {
column.nextBinary().toByteBuffer().duplicate().get(reuse);
return reuse;
} else {
return column.nextBinary().getBytes();
}
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy