org.apache.hudi.table.action.commit.FlinkWriteHelper Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.table.action.commit;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.config.TypedProperties;
import org.apache.hudi.common.data.HoodieListData;
import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.model.HoodieKey;
import org.apache.hudi.common.model.HoodieOperation;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordMerger;
import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.exception.HoodieException;
import org.apache.hudi.exception.HoodieUpsertException;
import org.apache.hudi.index.HoodieIndex;
import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.table.action.HoodieWriteMetadata;
import org.apache.avro.Schema;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* Overrides the {@link #write} method to not look up index and partition the records, because
* with {@code org.apache.hudi.operator.partitioner.BucketAssigner}, each hoodie record
* is tagged with a bucket ID (partition path + fileID) in streaming way. The FlinkWriteHelper only hands over
* the records to the action executor {@link BaseCommitActionExecutor} to execute.
*
* Computing the records batch locations all at a time is a pressure to the engine,
* we should avoid that in streaming system.
*/
public class FlinkWriteHelper extends BaseWriteHelper>,
List, List, R> {
private FlinkWriteHelper() {
super(ignored -> -1);
}
private static class WriteHelperHolder {
private static final FlinkWriteHelper FLINK_WRITE_HELPER = new FlinkWriteHelper();
}
public static FlinkWriteHelper newInstance() {
return WriteHelperHolder.FLINK_WRITE_HELPER;
}
@Override
public HoodieWriteMetadata> write(String instantTime, List> inputRecords, HoodieEngineContext context,
HoodieTable>, List, List> table, boolean shouldCombine, int configuredShuffleParallelism,
BaseCommitActionExecutor>, List, List, R> executor, WriteOperationType operationType) {
try {
Instant lookupBegin = Instant.now();
Duration indexLookupDuration = Duration.between(lookupBegin, Instant.now());
HoodieWriteMetadata> result = executor.execute(inputRecords);
result.setIndexLookupDuration(indexLookupDuration);
return result;
} catch (Throwable e) {
if (e instanceof HoodieUpsertException) {
throw (HoodieUpsertException) e;
}
throw new HoodieUpsertException("Failed to upsert for commit time " + instantTime, e);
}
}
@Override
protected List> tag(List> dedupedRecords, HoodieEngineContext context, HoodieTable>, List, List> table) {
return table.getIndex().tagLocation(HoodieListData.eager(dedupedRecords), context, table).collectAsList();
}
@Override
public List> deduplicateRecords(
List> records, HoodieIndex, ?> index, int parallelism, String schemaStr, TypedProperties props, HoodieRecordMerger merger) {
// If index used is global, then records are expected to differ in their partitionPath
Map