![JAR search and dependency download from the Maven repository](/logo.png)
org.dinky.shaded.paimon.table.sink.TableWriteImpl Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dinky.shaded.paimon.table.sink;
import org.dinky.shaded.paimon.FileStore;
import org.dinky.shaded.paimon.annotation.VisibleForTesting;
import org.dinky.shaded.paimon.data.BinaryRow;
import org.dinky.shaded.paimon.data.InternalRow;
import org.dinky.shaded.paimon.disk.IOManager;
import org.dinky.shaded.paimon.io.DataFileMeta;
import org.dinky.shaded.paimon.memory.MemoryPoolFactory;
import org.dinky.shaded.paimon.memory.MemorySegmentPool;
import org.dinky.shaded.paimon.metrics.MetricRegistry;
import org.dinky.shaded.paimon.operation.AbstractFileStoreWrite;
import org.dinky.shaded.paimon.operation.FileStoreWrite;
import org.dinky.shaded.paimon.utils.Restorable;
import java.util.List;
import java.util.concurrent.ExecutorService;
import static org.dinky.shaded.paimon.utils.Preconditions.checkState;
/**
* {@link TableWrite} implementation.
*
* @param type of record to write into {@link FileStore}.
*/
public class TableWriteImpl
implements InnerTableWrite, Restorable>> {
private final AbstractFileStoreWrite write;
private final KeyAndBucketExtractor keyAndBucketExtractor;
private final RecordExtractor recordExtractor;
private boolean batchCommitted = false;
private String tableName;
public TableWriteImpl(
FileStoreWrite write,
KeyAndBucketExtractor keyAndBucketExtractor,
RecordExtractor recordExtractor,
String tableName) {
this.write = (AbstractFileStoreWrite) write;
this.keyAndBucketExtractor = keyAndBucketExtractor;
this.recordExtractor = recordExtractor;
this.tableName = tableName;
}
@Override
public TableWriteImpl withIgnorePreviousFiles(boolean ignorePreviousFiles) {
write.withIgnorePreviousFiles(ignorePreviousFiles);
return this;
}
@Override
public TableWriteImpl isStreamingMode(boolean isStreamingMode) {
write.isStreamingMode(isStreamingMode);
return this;
}
@Override
public TableWriteImpl withIOManager(IOManager ioManager) {
write.withIOManager(ioManager);
return this;
}
@Override
public TableWriteImpl withMemoryPool(MemorySegmentPool memoryPool) {
write.withMemoryPool(memoryPool);
return this;
}
public TableWriteImpl withMemoryPoolFactory(MemoryPoolFactory memoryPoolFactory) {
write.withMemoryPoolFactory(memoryPoolFactory);
return this;
}
public TableWriteImpl withCompactExecutor(ExecutorService compactExecutor) {
write.withCompactExecutor(compactExecutor);
return this;
}
@Override
public BinaryRow getPartition(InternalRow row) {
keyAndBucketExtractor.setRecord(row);
return keyAndBucketExtractor.partition();
}
@Override
public int getBucket(InternalRow row) {
keyAndBucketExtractor.setRecord(row);
return keyAndBucketExtractor.bucket();
}
@Override
public void write(InternalRow row) throws Exception {
writeAndReturn(row);
}
public SinkRecord writeAndReturn(InternalRow row) throws Exception {
SinkRecord record = toSinkRecord(row);
write.write(record.partition(), record.bucket(), recordExtractor.extract(record));
return record;
}
@VisibleForTesting
public T writeAndReturnData(InternalRow row) throws Exception {
SinkRecord record = toSinkRecord(row);
T data = recordExtractor.extract(record);
write.write(record.partition(), record.bucket(), data);
return data;
}
private SinkRecord toSinkRecord(InternalRow row) {
keyAndBucketExtractor.setRecord(row);
return new SinkRecord(
keyAndBucketExtractor.partition(),
keyAndBucketExtractor.bucket(),
keyAndBucketExtractor.trimmedPrimaryKey(),
row);
}
public SinkRecord toLogRecord(SinkRecord record) {
keyAndBucketExtractor.setRecord(record.row());
return new SinkRecord(
record.partition(),
record.bucket(),
keyAndBucketExtractor.logPrimaryKey(),
record.row());
}
@Override
public void compact(BinaryRow partition, int bucket, boolean fullCompaction) throws Exception {
write.compact(partition, bucket, fullCompaction);
}
@Override
public TableWriteImpl withMetricRegistry(MetricRegistry metricRegistry) {
write.withMetricRegistry(metricRegistry);
return this;
}
/**
* Notify that some new files are created at given snapshot in given bucket.
*
* Most probably, these files are created by another job. Currently this method is only used
* by the dedicated compact job to see files created by writer jobs.
*/
public void notifyNewFiles(
long snapshotId, BinaryRow partition, int bucket, List files) {
write.notifyNewFiles(snapshotId, partition, bucket, files);
}
@Override
public List prepareCommit(boolean waitCompaction, long commitIdentifier)
throws Exception {
return write.prepareCommit(waitCompaction, commitIdentifier);
}
@Override
public List prepareCommit() throws Exception {
checkState(!batchCommitted, "BatchTableWrite only support one-time committing.");
batchCommitted = true;
return prepareCommit(true, BatchWriteBuilder.COMMIT_IDENTIFIER);
}
@Override
public void close() throws Exception {
write.close();
}
@Override
public List> checkpoint() {
return write.checkpoint();
}
@Override
public void restore(List> state) {
write.restore(state);
}
@VisibleForTesting
public AbstractFileStoreWrite getWrite() {
return write;
}
/** Extractor to extract {@link T} from the {@link SinkRecord}. */
public interface RecordExtractor {
T extract(SinkRecord record);
}
}