org.apache.iceberg.spark.SparkReadConf Maven / Gradle / Ivy
Show all versions of iceberg-spark3 Show documentation
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.spark.sql.SparkSession;
/**
* A class for common Iceberg configs for Spark reads.
*
* If a config is set at multiple levels, the following order of precedence is used (top to bottom):
*
* - Read options
* - Session configuration
* - Table metadata
*
* The most specific value is set in read options and takes precedence over all other configs.
* If no read option is provided, this class checks the session configuration for any overrides.
* If no applicable value is found in the session configuration, this class uses the table metadata.
*
* Note this class is NOT meant to be serialized and sent to executors.
*/
public class SparkReadConf {
private static final Set LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs");
private final Table table;
private final Map readOptions;
private final SparkConfParser confParser;
public SparkReadConf(SparkSession spark, Table table, Map readOptions) {
this.table = table;
this.readOptions = readOptions;
this.confParser = new SparkConfParser(spark, table, readOptions);
}
public boolean localityEnabled() {
InputFile file = table.io().newInputFile(table.location());
if (file instanceof HadoopInputFile) {
String scheme = ((HadoopInputFile) file).getFileSystem().getScheme();
boolean defaultValue = LOCALITY_WHITELIST_FS.contains(scheme);
return PropertyUtil.propertyAsBoolean(
readOptions,
SparkReadOptions.LOCALITY,
defaultValue);
}
return false;
}
public Long snapshotId() {
return confParser.longConf()
.option(SparkReadOptions.SNAPSHOT_ID)
.parseOptional();
}
public Long asOfTimestamp() {
return confParser.longConf()
.option(SparkReadOptions.AS_OF_TIMESTAMP)
.parseOptional();
}
public Long startSnapshotId() {
return confParser.longConf()
.option(SparkReadOptions.START_SNAPSHOT_ID)
.parseOptional();
}
public Long endSnapshotId() {
return confParser.longConf()
.option(SparkReadOptions.END_SNAPSHOT_ID)
.parseOptional();
}
public boolean streamingSkipDeleteSnapshots() {
return confParser.booleanConf()
.option(SparkReadOptions.STREAMING_SKIP_DELETE_SNAPSHOTS)
.defaultValue(SparkReadOptions.STREAMING_SKIP_DELETE_SNAPSHOTS_DEFAULT)
.parse();
}
public boolean parquetVectorizationEnabled() {
return confParser.booleanConf()
.option(SparkReadOptions.VECTORIZATION_ENABLED)
.sessionConf(SparkSQLProperties.VECTORIZATION_ENABLED)
.tableProperty(TableProperties.PARQUET_VECTORIZATION_ENABLED)
.defaultValue(TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT)
.parse();
}
public int parquetBatchSize() {
return confParser.intConf()
.option(SparkReadOptions.VECTORIZATION_BATCH_SIZE)
.tableProperty(TableProperties.PARQUET_BATCH_SIZE)
.defaultValue(TableProperties.PARQUET_BATCH_SIZE_DEFAULT)
.parse();
}
public boolean orcVectorizationEnabled() {
return confParser.booleanConf()
.option(SparkReadOptions.VECTORIZATION_ENABLED)
.sessionConf(SparkSQLProperties.VECTORIZATION_ENABLED)
.tableProperty(TableProperties.ORC_VECTORIZATION_ENABLED)
.defaultValue(TableProperties.ORC_VECTORIZATION_ENABLED_DEFAULT)
.parse();
}
public int orcBatchSize() {
return confParser.intConf()
.option(SparkReadOptions.VECTORIZATION_BATCH_SIZE)
.tableProperty(TableProperties.ORC_BATCH_SIZE)
.defaultValue(TableProperties.ORC_BATCH_SIZE_DEFAULT)
.parse();
}
public long splitSize() {
return confParser.longConf()
.option(SparkReadOptions.SPLIT_SIZE)
.tableProperty(TableProperties.SPLIT_SIZE)
.defaultValue(TableProperties.SPLIT_SIZE_DEFAULT)
.parse();
}
public int splitLookback() {
return confParser.intConf()
.option(SparkReadOptions.LOOKBACK)
.tableProperty(TableProperties.SPLIT_LOOKBACK)
.defaultValue(TableProperties.SPLIT_LOOKBACK_DEFAULT)
.parse();
}
public long splitOpenFileCost() {
return confParser.longConf()
.option(SparkReadOptions.FILE_OPEN_COST)
.tableProperty(TableProperties.SPLIT_OPEN_FILE_COST)
.defaultValue(TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT)
.parse();
}
/**
* Enables reading a timestamp without time zone as a timestamp with time zone.
*
* Generally, this is not safe as a timestamp without time zone is supposed to represent the wall-clock time,
* i.e. no matter the reader/writer timezone 3PM should always be read as 3PM,
* but a timestamp with time zone represents instant semantics, i.e. the timestamp
* is adjusted so that the corresponding time in the reader timezone is displayed.
*
* When set to false (default), an exception must be thrown while reading a timestamp without time zone.
*
* @return boolean indicating if reading timestamps without timezone is allowed
*/
public boolean handleTimestampWithoutZone() {
return confParser.booleanConf()
.option(SparkReadOptions.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE)
.sessionConf(SparkSQLProperties.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE)
.defaultValue(SparkSQLProperties.HANDLE_TIMESTAMP_WITHOUT_TIMEZONE_DEFAULT)
.parse();
}
public Long streamFromTimestamp() {
return confParser.longConf()
.option(SparkReadOptions.STREAM_FROM_TIMESTAMP)
.defaultValue(Long.MIN_VALUE)
.parse();
}
}