
io.vertx.kafka.client.consumer.KafkaReadStream Maven / Gradle / Ivy
/*
* Copyright 2016 Red Hat Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vertx.kafka.client.consumer;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
import io.vertx.core.streams.ReadStream;
import io.vertx.kafka.client.common.KafkaClientOptions;
import io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl;
import io.vertx.kafka.client.serialization.VertxSerdes;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.Deserializer;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
/**
* A {@link ReadStream} for consuming Kafka {@link ConsumerRecord}.
*
* The {@link #pause()} and {@link #resume()} provides global control over reading the records from the consumer.
*
* The {@link #pause(Set)} and {@link #resume(Set)} provides finer grained control over reading records
* for specific Topic/Partition, these are Kafka's specific operations.
*
*/
public interface KafkaReadStream extends ReadStream> {
@Override
KafkaReadStream exceptionHandler(Handler handler);
@Override
KafkaReadStream handler(@Nullable Handler> handler);
@Override
KafkaReadStream pause();
@Override
KafkaReadStream resume();
@Override
KafkaReadStream fetch(long amount);
@Override
KafkaReadStream endHandler(@Nullable Handler endHandler);
/**
* Returns the current demand.
*
*
* If the stream is in flowing mode will return {@link Long#MAX_VALUE}.
* - If the stream is in fetch mode, will return the current number of elements still to be delivered or 0 if paused.
*
*
* @return current demand
*/
long demand();
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Properties config) {
return new KafkaReadStreamImpl<>(
vertx,
new org.apache.kafka.clients.consumer.KafkaConsumer<>(config),
KafkaClientOptions.fromProperties(config, false));
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @param keyType class type for the key deserialization
* @param valueType class type for the value deserialization
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Properties config, Class keyType, Class valueType) {
Deserializer keyDeserializer = VertxSerdes.serdeFrom(keyType).deserializer();
Deserializer valueDeserializer = VertxSerdes.serdeFrom(valueType).deserializer();
return create(vertx, config, keyDeserializer, valueDeserializer);
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @param keyDeserializer key deserializer
* @param valueDeserializer value deserializer
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Properties config, Deserializer keyDeserializer, Deserializer valueDeserializer) {
return new KafkaReadStreamImpl<>(
vertx,
new org.apache.kafka.clients.consumer.KafkaConsumer<>(config, keyDeserializer, valueDeserializer),
KafkaClientOptions.fromProperties(config, false));
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Map config) {
return new KafkaReadStreamImpl<>(
vertx,
new org.apache.kafka.clients.consumer.KafkaConsumer<>(config),
KafkaClientOptions.fromMap(config, false));
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @param keyType class type for the key deserialization
* @param valueType class type for the value deserialization
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Map config, Class keyType, Class valueType) {
Deserializer keyDeserializer = VertxSerdes.serdeFrom(keyType).deserializer();
Deserializer valueDeserializer = VertxSerdes.serdeFrom(valueType).deserializer();
return create(vertx, config, keyDeserializer, valueDeserializer);
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param config Kafka consumer configuration
* @param keyDeserializer key deserializer
* @param valueDeserializer value deserializer
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Map config, Deserializer keyDeserializer, Deserializer valueDeserializer) {
return new KafkaReadStreamImpl<>(
vertx,
new org.apache.kafka.clients.consumer.KafkaConsumer<>(config, keyDeserializer, valueDeserializer),
KafkaClientOptions.fromMap(config, false));
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param options Kafka consumer options
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, KafkaClientOptions options) {
Map config = new HashMap<>();
if (options.getConfig() != null) {
config.putAll(options.getConfig());
}
return new KafkaReadStreamImpl<>(vertx, new org.apache.kafka.clients.consumer.KafkaConsumer<>(config), options);
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param options Kafka consumer options
* @param keyType class type for the key deserialization
* @param valueType class type for the value deserialization
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, KafkaClientOptions options, Class keyType, Class valueType) {
Deserializer keyDeserializer = VertxSerdes.serdeFrom(keyType).deserializer();
Deserializer valueDeserializer = VertxSerdes.serdeFrom(valueType).deserializer();
return create(vertx, options, keyDeserializer, valueDeserializer);
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param options Kafka consumer options
* @param keyDeserializer key deserializer
* @param valueDeserializer value deserializer
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, KafkaClientOptions options, Deserializer keyDeserializer, Deserializer valueDeserializer) {
Map config = new HashMap<>();
if (options.getConfig() != null) {
config.putAll(options.getConfig());
}
return new KafkaReadStreamImpl<>(
vertx,
new org.apache.kafka.clients.consumer.KafkaConsumer<>(config, keyDeserializer, valueDeserializer),
options);
}
/**
* Create a new KafkaReadStream instance
*
* @param vertx Vert.x instance to use
* @param consumer native Kafka consumer instance
* @return an instance of the KafkaReadStream
*/
static KafkaReadStream create(Vertx vertx, Consumer consumer) {
return new KafkaReadStreamImpl<>(vertx, consumer, new KafkaClientOptions());
}
/**
* Get the last committed offset for the given partition (whether the commit happened by this process or another).
*
* @param topicPartition topic partition for getting last committed offset
* @param handler handler called on operation completed
*/
void committed(TopicPartition topicPartition, Handler> handler);
/**
* Like {@link #committed(TopicPartition, Handler)} but returns a {@code Future} of the asynchronous result
*/
Future committed(TopicPartition topicPartition);
/**
* Suspend fetching from the requested partitions.
*
* @param topicPartitions topic partition from which suspend fetching
* @return a {@code Future} completed with the operation result
*/
Future pause(Set topicPartitions);
/**
* Suspend fetching from the requested partitions.
*
* Due to internal buffering of messages,
* the {@linkplain #handler(Handler) record handler} will
* continue to observe messages from the given {@code topicPartitions}
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will not see messages
* from the given {@code topicPartitions}.
*
* @param topicPartitions topic partition from which suspend fetching
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream pause(Set topicPartitions, Handler> completionHandler);
/**
* Get the set of partitions that were previously paused by a call to {@link #pause(Set)}.
*
* @param handler handler called on operation completed
*/
void paused(Handler>> handler);
/**
* Like {@link #paused(Handler)} but returns a {@code Future} of the asynchronous result
*/
Future> paused();
/**
* Resume specified partitions which have been paused with pause.
*
* @param topicPartitions topic partition from which resume fetching
* @return a {@code Future} completed with the operation result
*/
Future resume(Set topicPartitions);
/**
* Resume specified partitions which have been paused with pause.
*
* @param topicPartitions topic partition from which resume fetching
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream resume(Set topicPartitions, Handler> completionHandler);
/**
* Seek to the last offset for each of the given partitions.
*
* @param topicPartitions topic partition for which seek
* @return a {@code Future} completed with the operation result
*/
Future seekToEnd(Set topicPartitions);
/**
* Seek to the last offset for each of the given partitions.
*
* Due to internal buffering of messages,
* the {@linkplain #handler(Handler) record handler} will
* continue to observe messages fetched with respect to the old offset
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new offset.
*
* @param topicPartitions topic partition for which seek
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream seekToEnd(Set topicPartitions, Handler> completionHandler);
/**
* Seek to the first offset for each of the given partitions.
*
* @param topicPartitions topic partition for which seek
* @return a {@code Future} completed with the operation result
*/
Future seekToBeginning(Set topicPartitions);
/**
* Seek to the first offset for each of the given partitions.
*
* Due to internal buffering of messages,
* the {@linkplain #handler(Handler) record handler} will
* continue to observe messages fetched with respect to the old offset
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new offset.
*
* @param topicPartitions topic partition for which seek
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream seekToBeginning(Set topicPartitions, Handler> completionHandler);
/**
* Overrides the fetch offsets that the consumer will use on the next poll.
*
* @param topicPartition topic partition for which seek
* @param offset offset to seek inside the topic partition
* @return a {@code Future} completed with the operation result
*/
Future seek(TopicPartition topicPartition, long offset);
/**
* Overrides the fetch offsets that the consumer will use on the next poll.
*
* Due to internal buffering of messages,
* the {@linkplain #handler(Handler) record handler} will
* continue to observe messages fetched with respect to the old offset
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new offset.
*
* @param topicPartition topic partition for which seek
* @param offset offset to seek inside the topic partition
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream seek(TopicPartition topicPartition, long offset, Handler> completionHandler);
/**
* Set the handler called when topic partitions are revoked to the consumer
*
* @param handler handler called on revoked topic partitions
* @return current KafkaReadStream instance
*/
KafkaReadStream partitionsRevokedHandler(Handler> handler);
/**
* Set the handler called when topic partitions are assigned to the consumer
*
* @param handler handler called on assigned topic partitions
* @return current KafkaReadStream instance
*/
KafkaReadStream partitionsAssignedHandler(Handler> handler);
/**
* Subscribe to the given list of topics to get dynamically assigned partitions.
*
* @param topics topics to subscribe to
* @return a {@code Future} completed with the operation result
*/
Future subscribe(Set topics);
/**
* Subscribe to the given list of topics to get dynamically assigned partitions.
*
* Due to internal buffering of messages, when changing the subscribed topics
* the old set of topics may remain in effect
* (as observed by the {@linkplain #handler(Handler)} record handler})
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new set of topics.
*
* @param topics topics to subscribe to
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream subscribe(Set topics, Handler> completionHandler);
/**
* Subscribe to all topics matching specified pattern to get dynamically assigned partitions.
*
* Due to internal buffering of messages, when changing the subscribed topics
* the old set of topics may remain in effect
* (as observed by the {@linkplain #handler(Handler)} record handler})
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new set of topics.
*
* @param pattern Pattern to subscribe to
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream subscribe(Pattern pattern, Handler> completionHandler);
/**
* Subscribe to all topics matching specified pattern to get dynamically assigned partitions.
*
* @param pattern Pattern to subscribe to
* @return a {@code Future} completed with the operation result
*/
Future subscribe(Pattern pattern);
/**
* Unsubscribe from topics currently subscribed with subscribe.
*
* @return current KafkaReadStream instance
*/
Future unsubscribe();
/**
* Unsubscribe from topics currently subscribed with subscribe.
*
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream unsubscribe(Handler> completionHandler);
/**
* Get the current subscription.
*
* @param handler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream subscription(Handler>> handler);
/**
* Like {@link #subscription(Handler)} but returns a {@code Future} of the asynchronous result
*/
Future> subscription();
/**
* Manually assign a set of partitions to this consumer.
*
* @param partitions partitions which want assigned
* @return current KafkaReadStream instance
*/
Future assign(Set partitions);
/**
* Manually assign a set of partitions to this consumer.
*
* Due to internal buffering of messages, when reassigning
* the old set of partitions may remain in effect
* (as observed by the {@linkplain #handler(Handler)} record handler)}
* until some time after the given {@code completionHandler}
* is called. In contrast, the once the given {@code completionHandler}
* is called the {@link #batchHandler(Handler)} will only see messages
* consistent with the new set of partitions.
*
* @param partitions partitions which want assigned
* @param completionHandler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream assign(Set partitions, Handler> completionHandler);
/**
* Get the set of partitions currently assigned to this consumer.
*
* @param handler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream assignment(Handler>> handler);
/**
* Like {@link #assignment(Handler)} but returns a {@code Future} of the asynchronous result
*/
Future> assignment();
/**
* Get metadata about partitions for all topics that the user is authorized to view.
*
* @param handler handler called on operation completed
* @return current KafkaReadStream instance
*/
KafkaReadStream listTopics(Handler>>> handler);
/**
* Like {@link #listTopics(Handler)} but returns a {@code Future} of the asynchronous result
*/
Future