org.apache.hudi.sink.bucket.BucketStreamWriteFunction Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.sink.bucket;
import org.apache.hudi.common.model.HoodieKey;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordLocation;
import org.apache.hudi.common.util.Functions;
import org.apache.hudi.common.util.hash.BucketIndexUtil;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.configuration.OptionsResolver;
import org.apache.hudi.index.bucket.BucketIdentifier;
import org.apache.hudi.sink.StreamWriteFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* A stream write function with simple bucket hash index.
*
* The task holds a fresh new local index: {(partition + bucket number) &rarr fileId} mapping, this index
* is used for deciding whether the incoming records in an UPDATE or INSERT.
* The index is local because different partition paths have separate items in the index.
*
* @param the input type
*/
public class BucketStreamWriteFunction extends StreamWriteFunction {
private static final Logger LOG = LoggerFactory.getLogger(BucketStreamWriteFunction.class);
private int parallelism;
private int bucketNum;
private String indexKeyFields;
private boolean isNonBlockingConcurrencyControl;
/**
* BucketID to file group mapping in each partition.
* Map(partition -> Map(bucketId, fileID)).
*/
private Map> bucketIndex;
/**
* Incremental bucket index of the current checkpoint interval,
* it is needed because the bucket type('I' or 'U') should be decided based on the committed files view,
* all the records in one bucket should have the same bucket type.
*/
private Set incBucketIndex;
/**
* Functions for calculating the task partition to dispatch.
*/
private Functions.Function2 partitionIndexFunc;
/**
* To prevent strings compare for each record, define this only during open()
*/
private boolean isInsertOverwrite;
/**
* Constructs a BucketStreamWriteFunction.
*
* @param config The config options
*/
public BucketStreamWriteFunction(Configuration config) {
super(config);
}
@Override
public void open(Configuration parameters) throws IOException {
super.open(parameters);
this.bucketNum = config.getInteger(FlinkOptions.BUCKET_INDEX_NUM_BUCKETS);
this.indexKeyFields = OptionsResolver.getIndexKeyField(config);
this.isNonBlockingConcurrencyControl = OptionsResolver.isNonBlockingConcurrencyControl(config);
this.taskID = getRuntimeContext().getIndexOfThisSubtask();
this.parallelism = getRuntimeContext().getNumberOfParallelSubtasks();
this.bucketIndex = new HashMap<>();
this.incBucketIndex = new HashSet<>();
this.partitionIndexFunc = BucketIndexUtil.getPartitionIndexFunc(bucketNum, parallelism);
this.isInsertOverwrite = OptionsResolver.isInsertOverwrite(config);
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
super.initializeState(context);
}
@Override
public void snapshotState() {
super.snapshotState();
this.incBucketIndex.clear();
}
@Override
public void processElement(I i, ProcessFunction.Context context, Collector