org.apache.iceberg.BaseIncrementalChangelogScan Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of iceberg-core Show documentation
Show all versions of iceberg-core Show documentation
A table format for huge analytic datasets
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.ArrayDeque;
import java.util.Collection;
import java.util.Deque;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.iceberg.ManifestGroup.CreateTasksFunction;
import org.apache.iceberg.ManifestGroup.TaskContext;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.relocated.com.google.common.collect.FluentIterable;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.util.SnapshotUtil;
import org.apache.iceberg.util.TableScanUtil;
class BaseIncrementalChangelogScan
extends BaseIncrementalScan<
IncrementalChangelogScan, ChangelogScanTask, ScanTaskGroup>
implements IncrementalChangelogScan {
BaseIncrementalChangelogScan(Table table) {
this(table, table.schema(), TableScanContext.empty());
}
private BaseIncrementalChangelogScan(Table table, Schema schema, TableScanContext context) {
super(table, schema, context);
}
@Override
protected IncrementalChangelogScan newRefinedScan(
Table newTable, Schema newSchema, TableScanContext newContext) {
return new BaseIncrementalChangelogScan(newTable, newSchema, newContext);
}
@Override
protected CloseableIterable doPlanFiles(
Long fromSnapshotIdExclusive, long toSnapshotIdInclusive) {
Deque changelogSnapshots =
orderedChangelogSnapshots(fromSnapshotIdExclusive, toSnapshotIdInclusive);
if (changelogSnapshots.isEmpty()) {
return CloseableIterable.empty();
}
Set changelogSnapshotIds = toSnapshotIds(changelogSnapshots);
Set newDataManifests =
FluentIterable.from(changelogSnapshots)
.transformAndConcat(snapshot -> snapshot.dataManifests(table().io()))
.filter(manifest -> changelogSnapshotIds.contains(manifest.snapshotId()))
.toSet();
ManifestGroup manifestGroup =
new ManifestGroup(table().io(), newDataManifests, ImmutableList.of())
.specsById(table().specs())
.caseSensitive(isCaseSensitive())
.select(scanColumns())
.filterData(filter())
.filterManifestEntries(entry -> changelogSnapshotIds.contains(entry.snapshotId()))
.ignoreExisting()
.columnsToKeepStats(columnsToKeepStats());
if (shouldIgnoreResiduals()) {
manifestGroup = manifestGroup.ignoreResiduals();
}
if (newDataManifests.size() > 1 && shouldPlanWithExecutor()) {
manifestGroup = manifestGroup.planWith(planExecutor());
}
return manifestGroup.plan(new CreateDataFileChangeTasks(changelogSnapshots));
}
@Override
public CloseableIterable> planTasks() {
return TableScanUtil.planTaskGroups(
planFiles(), targetSplitSize(), splitLookback(), splitOpenFileCost());
}
// builds a collection of changelog snapshots (oldest to newest)
// the order of the snapshots is important as it is used to determine change ordinals
private Deque orderedChangelogSnapshots(Long fromIdExcl, long toIdIncl) {
Deque changelogSnapshots = new ArrayDeque<>();
for (Snapshot snapshot : SnapshotUtil.ancestorsBetween(table(), toIdIncl, fromIdExcl)) {
if (!snapshot.operation().equals(DataOperations.REPLACE)) {
if (!snapshot.deleteManifests(table().io()).isEmpty()) {
throw new UnsupportedOperationException(
"Delete files are currently not supported in changelog scans");
}
changelogSnapshots.addFirst(snapshot);
}
}
return changelogSnapshots;
}
private Set toSnapshotIds(Collection snapshots) {
return snapshots.stream().map(Snapshot::snapshotId).collect(Collectors.toSet());
}
private static Map computeSnapshotOrdinals(Deque snapshots) {
Map snapshotOrdinals = Maps.newHashMap();
int ordinal = 0;
for (Snapshot snapshot : snapshots) {
snapshotOrdinals.put(snapshot.snapshotId(), ordinal++);
}
return snapshotOrdinals;
}
private static class CreateDataFileChangeTasks implements CreateTasksFunction {
private static final DeleteFile[] NO_DELETES = new DeleteFile[0];
private final Map snapshotOrdinals;
CreateDataFileChangeTasks(Deque snapshots) {
this.snapshotOrdinals = computeSnapshotOrdinals(snapshots);
}
@Override
public CloseableIterable apply(
CloseableIterable> entries, TaskContext context) {
return CloseableIterable.transform(
entries,
entry -> {
long commitSnapshotId = entry.snapshotId();
int changeOrdinal = snapshotOrdinals.get(commitSnapshotId);
DataFile dataFile = entry.file().copy(context.shouldKeepStats());
switch (entry.status()) {
case ADDED:
return new BaseAddedRowsScanTask(
changeOrdinal,
commitSnapshotId,
dataFile,
NO_DELETES,
context.schemaAsString(),
context.specAsString(),
context.residuals());
case DELETED:
return new BaseDeletedDataFileScanTask(
changeOrdinal,
commitSnapshotId,
dataFile,
NO_DELETES,
context.schemaAsString(),
context.specAsString(),
context.residuals());
default:
throw new IllegalArgumentException("Unexpected entry status: " + entry.status());
}
});
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy