org.apache.camel.component.aws2.kinesis.Kinesis2Consumer Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.aws2.kinesis;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.camel.AsyncCallback;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.component.aws2.kinesis.consumer.KinesisResumeAction;
import org.apache.camel.resume.ResumeAction;
import org.apache.camel.resume.ResumeActionAware;
import org.apache.camel.resume.ResumeAware;
import org.apache.camel.resume.ResumeStrategy;
import org.apache.camel.support.ScheduledBatchPollingConsumer;
import org.apache.camel.util.CastUtils;
import org.apache.camel.util.ObjectHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
import software.amazon.awssdk.services.kinesis.model.DescribeStreamResponse;
import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest;
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest;
import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse;
import software.amazon.awssdk.services.kinesis.model.ListShardsRequest;
import software.amazon.awssdk.services.kinesis.model.Record;
import software.amazon.awssdk.services.kinesis.model.Shard;
import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
public class Kinesis2Consumer extends ScheduledBatchPollingConsumer implements ResumeAware {
private static final Logger LOG = LoggerFactory.getLogger(Kinesis2Consumer.class);
private KinesisConnection connection;
private ResumeStrategy resumeStrategy;
private final Map currentShardIterators = new java.util.HashMap<>();
private final Set warnLogged = new HashSet<>();
private volatile List currentShardList = List.of();
private static final String SHARD_MONITOR_EXECUTOR_NAME = "Kinesis_shard_monitor";
private ScheduledExecutorService shardMonitorExecutor;
public Kinesis2Consumer(Kinesis2Endpoint endpoint,
Processor processor) {
super(endpoint, processor);
}
public KinesisConnection getConnection() {
return connection;
}
public void setConnection(KinesisConnection connection) {
this.connection = connection;
}
public boolean isShardClosed(String shardId) {
return currentShardIterators.get(shardId) == null && currentShardIterators.containsKey(shardId);
}
@Override
protected int poll() throws Exception {
var processedExchangeCount = new AtomicInteger(0);
String shardId = getEndpoint().getConfiguration().getShardId();
if (!shardId.isEmpty()) {
// skip if the shard is closed
if (isShardClosed(shardId)) {
// There was previously a shardIterator but shard is now closed
handleClosedShard(shardId);
return 0;
}
var request = DescribeStreamRequest
.builder()
.streamName(getEndpoint().getConfiguration().getStreamName())
.build();
DescribeStreamResponse response;
if (getEndpoint().getConfiguration().isAsyncClient()) {
try {
response = connection
.getAsyncClient(getEndpoint())
.describeStream(request)
.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return 0;
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
} else {
response = connection
.getClient(getEndpoint())
.describeStream(request);
}
var shard = response
.streamDescription()
.shards()
.stream()
.filter(shardItem -> shardItem
.shardId()
.equalsIgnoreCase(getEndpoint()
.getConfiguration()
.getShardId()))
.findFirst()
.orElseThrow(() -> new IllegalStateException("The shard can't be found"));
fetchAndPrepareRecordsForCamel(shard, connection, processedExchangeCount);
} else {
getCurrentShardList()
.parallelStream()
.forEach(shard -> fetchAndPrepareRecordsForCamel(shard, connection, processedExchangeCount));
}
// okay we have some response from aws so lets mark the consumer as ready
forceConsumerAsReady();
return processedExchangeCount.get();
}
private void fetchAndPrepareRecordsForCamel(
final Shard shard,
final KinesisConnection kinesisConnection,
AtomicInteger processedExchangeCount) {
String shardIterator;
try {
shardIterator = getShardIterator(shard, kinesisConnection);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
if (shardIterator == null) {
// Unable to get an interator so shard must be closed
processedExchangeCount.set(0);
return;
}
GetRecordsRequest req = GetRecordsRequest
.builder()
.shardIterator(shardIterator)
.limit(getEndpoint()
.getConfiguration()
.getMaxResultsPerRequest())
.build();
GetRecordsResponse result;
if (getEndpoint().getConfiguration().isAsyncClient()) {
try {
result = kinesisConnection
.getAsyncClient(getEndpoint())
.getRecords(req)
.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
} else {
result = kinesisConnection
.getClient(getEndpoint())
.getRecords(req);
}
try {
Queue exchanges = createExchanges(shard, result.records());
processedExchangeCount.getAndSet(processBatch(CastUtils.cast(exchanges)));
} catch (Exception e) {
throw new RuntimeException(e);
}
// May cache the last successful sequence number, and pass it to the
// getRecords request. That way, on the next poll, we start from where
// we left off, however, I don't know what happens to subsequent
// exchanges when an earlier exchange fails.
updateShardIterator(shard, result.nextShardIterator());
}
private void updateShardIterator(Shard shard, String nextShardIterator) {
currentShardIterators.put(shard.shardId(), nextShardIterator);
}
@Override
public int processBatch(Queue
© 2015 - 2025 Weber Informatics LLC | Privacy Policy