org.apache.iceberg.connect.IcebergSinkTask Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of iceberg-kafka-connect Show documentation
Show all versions of iceberg-kafka-connect Show documentation
A table format for huge analytic datasets
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.connect;
import java.util.Collection;
import java.util.Map;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class IcebergSinkTask extends SinkTask {
private static final Logger LOG = LoggerFactory.getLogger(IcebergSinkTask.class);
private IcebergSinkConfig config;
private Catalog catalog;
private Committer committer;
@Override
public String version() {
return IcebergSinkConfig.version();
}
@Override
public void start(Map props) {
this.config = new IcebergSinkConfig(props);
}
@Override
public void open(Collection partitions) {
Preconditions.checkArgument(catalog == null, "Catalog already open");
Preconditions.checkArgument(committer == null, "Committer already open");
catalog = CatalogUtils.loadCatalog(config);
committer = CommitterFactory.createCommitter(config);
committer.start(catalog, config, context);
}
@Override
public void close(Collection partitions) {
close();
}
private void close() {
if (committer != null) {
committer.stop();
committer = null;
}
if (catalog != null) {
if (catalog instanceof AutoCloseable) {
try {
((AutoCloseable) catalog).close();
} catch (Exception e) {
LOG.warn("An error occurred closing catalog instance, ignoring...", e);
}
}
catalog = null;
}
}
@Override
public void put(Collection sinkRecords) {
Preconditions.checkNotNull(committer, "Committer wasn't initialized");
committer.save(sinkRecords);
}
@Override
public void flush(Map currentOffsets) {
Preconditions.checkNotNull(committer, "Committer wasn't initialized");
committer.save(null);
}
@Override
public Map preCommit(
Map currentOffsets) {
// offset commit is handled by the worker
return ImmutableMap.of();
}
@Override
public void stop() {
close();
}
}