com.github.ddth.kafka.internal.KafkaMsgConsumer Maven / Gradle / Ivy
package com.github.ddth.kafka.internal;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.clients.consumer.CommitFailedException;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.ddth.kafka.IKafkaMessageListener;
import com.github.ddth.kafka.KafkaMessage;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
/**
* A simple Kafka consumer client.
*
*
* Each {@link KafkaMsgConsumer} is associated with a unique consumer-group-id.
*
*
*
* One single {@link KafkaMsgConsumer} is used to consume messages from multiple
* topics.
*
*
* @author Thanh Ba Nguyen
* @since 1.0.0
*/
public class KafkaMsgConsumer {
private final Logger LOGGER = LoggerFactory.getLogger(KafkaMsgConsumer.class);
private String consumerGroupId;
private boolean consumeFromBeginning = false;
/* Mapping {topic -> KafkaConsumer} */
private ConcurrentMap> topicConsumers = new ConcurrentHashMap>();
/* Mapping {topic -> BlockingQueue} */
private ConcurrentMap>> topicBuffers = new ConcurrentHashMap>>();
/* Mapping {topic -> [IKafkaMessageListener]} */
private Multimap topicMsgListeners = HashMultimap.create();
/* Mapping {topic -> KafkaMsgConsumerWorker} */
private ConcurrentMap topicWorkers = new ConcurrentHashMap();
private String bootstrapServers;
private KafkaConsumer, ?> metadataConsumer;
private Properties consumerProperties;
private ExecutorService executorService;
private boolean myOwnExecutorService = true;
/**
* Constructs an new {@link KafkaMsgConsumer} object.
*
* @since 1.3.0
*/
public KafkaMsgConsumer(String bootstrapServers, String consumerGroupId,
KafkaConsumer, ?> metadataConsumer) {
this.bootstrapServers = bootstrapServers;
this.consumerGroupId = consumerGroupId;
setMetadataConsumer(metadataConsumer);
}
/**
* Constructs an new {@link KafkaMsgConsumer} object.
*/
public KafkaMsgConsumer(String bootstrapServers, String consumerGroupId,
boolean consumeFromBeginning) {
this.bootstrapServers = bootstrapServers;
this.consumerGroupId = consumerGroupId;
this.consumeFromBeginning = consumeFromBeginning;
}
/**
* Constructs an new {@link KafkaMsgConsumer} object.
*
* @since 1.3.0
*/
public KafkaMsgConsumer(String bootstrapServers, String consumerGroupId,
boolean consumeFromBeginning, KafkaConsumer, ?> metadataConsumer) {
this.bootstrapServers = bootstrapServers;
this.consumerGroupId = consumerGroupId;
this.consumeFromBeginning = consumeFromBeginning;
setMetadataConsumer(metadataConsumer);
}
/**
*
* @return
* @since 1.3.0
*/
public KafkaConsumer, ?> getMetadataConsumer() {
return metadataConsumer;
}
/**
*
* @param metadataConsumer
* @return
* @since 1.3.0
*/
public KafkaMsgConsumer setMetadataConsumer(KafkaConsumer, ?> metadataConsumer) {
this.metadataConsumer = metadataConsumer;
return this;
}
/**
* Each Kafka consumer is associated with a consumer group id.
*
*
* If two or more consumers have a same group-id, and consume messages from
* a same topic: messages will be consumed just like a queue: no message is
* consumed by more than one consumer. Which consumer consumes which message
* is undetermined.
*
*
*
* If two or more consumers with different group-ids, and consume messages
* from a same topic: messages will be consumed just like publish-subscribe
* pattern: one message is consumed by all consumers.
*
*
* @return
*/
public String getConsumerGroupId() {
return consumerGroupId;
}
/**
* See {@link #getConsumerGroupId()}.
*
* @param consumerGroupId
* @return
*/
public KafkaMsgConsumer setConsumerGroupId(String consumerGroupId) {
this.consumerGroupId = consumerGroupId;
return this;
}
/**
* Consume messages from the beginning? See {@code auto.offset.reset} option
* at http://kafka.apache.org/08/configuration.html.
*
* @return
*/
public boolean isConsumeFromBeginning() {
return consumeFromBeginning;
}
/**
* Alias of {@link #isConsumeFromBeginning()}.
*
* @return
*/
public boolean getConsumeFromBeginning() {
return consumeFromBeginning;
}
/**
* Consume messages from the beginning? See {@code auto.offset.reset} option
* at http://kafka.apache.org/08/configuration.html.
*
* @param consumeFromBeginning
* @return
*/
public KafkaMsgConsumer setConsumeFromBeginning(boolean consumeFromBeginning) {
this.consumeFromBeginning = consumeFromBeginning;
return this;
}
/**
* Gets custom consumer configuration properties.
*
* @return
* @since 1.2.1
*/
public Properties getConsumerProperties() {
return consumerProperties;
}
/**
* Sets custom consumer configuration properties.
*
* @param props
* @return
* @since 1.2.1
*/
public KafkaMsgConsumer setConsumerProperties(Properties props) {
if (props == null) {
consumerProperties = null;
} else {
consumerProperties = new Properties();
consumerProperties.putAll(props);
}
return this;
}
/**
* Initializing method.
*/
public void init() {
if (executorService == null) {
int numThreads = Math.min(Math.max(Runtime.getRuntime().availableProcessors(), 1), 4);
executorService = Executors.newFixedThreadPool(numThreads);
myOwnExecutorService = true;
} else {
myOwnExecutorService = false;
}
}
/**
* Destroying method.
*/
public void destroy() {
// stop all workers
for (KafkaMsgConsumerWorker worker : topicWorkers.values()) {
try {
worker.stopWorker();
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
}
topicWorkers.clear();
// clear all message listeners
topicMsgListeners.clear();
// close all KafkaConsumer
for (KafkaConsumer consumer : topicConsumers.values()) {
try {
consumer.commitSync();
consumer.close();
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
}
topicConsumers.clear();
if (executorService != null && myOwnExecutorService) {
try {
executorService.shutdownNow();
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
} finally {
executorService = null;
}
}
}
private Map> topicInfo = null;
private long lastTopicInfoFetched = 0;
private Map> getTopicInfo() {
if (topicInfo == null || lastTopicInfoFetched + 1000 < System.currentTimeMillis()) {
synchronized (metadataConsumer) {
topicInfo = metadataConsumer.listTopics();
}
lastTopicInfoFetched = System.currentTimeMillis();
}
return topicInfo;
}
/**
* Checks if a Kafka topic exists.
*
* @param topicName
* @return
* @since 1.2.0
*/
public boolean topicExists(String topicName) {
Map> topicInfo = getTopicInfo();
return topicInfo != null && topicInfo.containsKey(topicName);
}
/**
* Gets number of partitions of a topic.
*
* @param topicName
* @return topic's number of partitions, or {@code 0} if the topic does not
* exist
* @since 1.2.0
*/
public int getNumPartitions(String topicName) {
Map> topicInfo = getTopicInfo();
List partitionInfo = topicInfo != null ? topicInfo.get(topicName) : null;
return partitionInfo != null ? partitionInfo.size() : 0;
}
/**
* Gets partition information of a topic.
*
* @param topicName
* @return list of {@link PartitionInfo} or {@code null} if topic does not
* exist.
* @since 1.3.0
*/
public List getPartitionInfo(String topicName) {
Map> topicInfo = getTopicInfo();
List partitionInfo = topicInfo != null ? topicInfo.get(topicName) : null;
return partitionInfo != null ? Collections.unmodifiableList(partitionInfo) : null;
}
/**
* Gets all available topics.
*
* @return
* @since 1.3.0
*/
@SuppressWarnings("unchecked")
public Set getTopics() {
Map> topicInfo = getTopicInfo();
Set topics = topicInfo != null ? topicInfo.keySet() : null;
return topics != null ? Collections.unmodifiableSet(topics) : Collections.EMPTY_SET;
}
/**
* Sets an {@link ExecutorService} to be used for async task.
*
* @param executorService
* @return
* @since 1.3.1
*/
public KafkaMsgConsumer setExecutorService(ExecutorService executorService) {
if (this.executorService != null) {
this.executorService.shutdown();
}
this.executorService = executorService;
myOwnExecutorService = false;
return this;
}
/**
* Seeks to the beginning of all partitions of a topic.
*
* @param topic
* @since 1.2.0
*/
public void seekToBeginning(String topic) {
KafkaConsumer, ?> consumer = _getConsumer(topic, true, true);
KafkaHelper.seekToBeginning(consumer, topic);
}
/**
* Seeks to the end of all partitions of a topic.
*
* @param topic
* @since 1.2.0
*/
public void seekToEnd(String topic) {
KafkaConsumer, ?> consumer = _getConsumer(topic, true, true);
KafkaHelper.seekToEnd(consumer, topic);
}
/**
* Prepares a consumer to consume messages from a Kafka topic.
*
* @param topic
* @param autoCommitOffset
* @param leaderAutoRebalance
* @since 1.2.0
*/
private KafkaConsumer _getConsumer(String topic, boolean autoCommitOffset,
boolean leaderAutoRebalance) {
KafkaConsumer consumer = topicConsumers.get(topic);
if (consumer == null) {
consumer = KafkaHelper.createKafkaConsumer(bootstrapServers, consumerGroupId,
consumeFromBeginning, autoCommitOffset, leaderAutoRebalance,
consumerProperties);
KafkaConsumer existingConsumer = topicConsumers.putIfAbsent(topic,
consumer);
if (existingConsumer != null) {
consumer.close();
consumer = existingConsumer;
}
}
return consumer;
}
/**
* Gets a buffer to store consumed messages from a Kafka topic.
*
* @param topic
* @return
* @since 1.2.0
*/
private BlockingQueue> _getBuffer(String topic) {
BlockingQueue> buffer = topicBuffers.get(topic);
if (buffer == null) {
buffer = new LinkedBlockingQueue>();
BlockingQueue> existingBuffer = topicBuffers
.putIfAbsent(topic, buffer);
if (existingBuffer != null) {
buffer = existingBuffer;
}
}
return buffer;
}
/**
* Prepares a worker to consume messages from a Kafka topic.
*
* @param topic
* @param autoCommitOffset
* @return
*/
private KafkaMsgConsumerWorker _getWorker(String topic, boolean autoCommitOffset) {
KafkaMsgConsumerWorker worker = topicWorkers.get(topic);
if (worker == null) {
Collection msgListeners = topicMsgListeners.get(topic);
worker = new KafkaMsgConsumerWorker(this, topic, msgListeners, executorService);
KafkaMsgConsumerWorker existingWorker = topicWorkers.putIfAbsent(topic, worker);
if (existingWorker != null) {
worker = existingWorker;
} else {
worker.start();
}
}
return worker;
}
/**
* Adds a message listener to a topic.
*
* @param topic
* @param messageListener
* @return {@code true} if successful, {@code false} otherwise (the listener
* may have been added already)
*/
public boolean addMessageListener(String topic, IKafkaMessageListener messageListener) {
return addMessageListener(topic, messageListener, true);
}
/**
* Adds a message listener to a topic.
*
* @param topic
* @param messageListener
* @param autoCommitOffset
* @return {@code true} if successful, {@code false} otherwise (the listener
* may have been added already)
*/
public boolean addMessageListener(String topic, IKafkaMessageListener messageListener,
boolean autoCommitOffset) {
synchronized (topicMsgListeners) {
if (topicMsgListeners.put(topic, messageListener)) {
_getWorker(topic, autoCommitOffset);
return true;
}
}
return false;
}
/**
* Removes a topic message listener.
*
* @param topic
* @param msgListener
* @return {@code true} if successful, {@code false} otherwise (the topic
* may have no such listener added before)
*/
public boolean removeMessageListener(String topic, IKafkaMessageListener msgListener) {
synchronized (topicMsgListeners) {
if (topicMsgListeners.remove(topic, msgListener)) {
if (topicMsgListeners.get(topic).isEmpty()) {
// no more listener, stop worker
KafkaMsgConsumerWorker worker = topicWorkers.remove(topic);
if (worker != null) {
worker.stopWorker();
}
}
return true;
}
}
return false;
}
/**
* Consumes one message from a topic.
*
* @param topic
* @return the consumed message or {@code null} if no message available
*/
public KafkaMessage consume(final String topic) {
return consume(topic, 1000, TimeUnit.MILLISECONDS);
}
/**
* Fetches messages from Kafka and puts into buffer.
*
* @param buffer
* @param topic
* @param waitTime
* @param waitTimeUnit
*/
private void _fetch(BlockingQueue> buffer, String topic,
long waitTime, TimeUnit waitTimeUnit) {
KafkaConsumer consumer = _getConsumer(topic, true, true);
synchronized (consumer) {
Set subscription = consumer.subscription();
if (subscription == null || subscription.size() == 0) {
// this consumer has not subscribed to any topic yet
// if (topicExists(topic)) {
List topics = Arrays.asList(topic);
consumer.subscribe(topics);
// }
}
try {
consumer.commitSync();
} catch (CommitFailedException e) {
LOGGER.warn(e.getMessage(), e);
}
subscription = consumer.subscription();
ConsumerRecords crList = subscription != null && subscription.size() > 0
? consumer.poll(waitTimeUnit.toMillis(waitTime)) : null;
if (crList != null) {
for (ConsumerRecord cr : crList) {
buffer.offer(cr);
}
}
}
}
/**
* Consumes one message from a topic, wait up to specified wait-time.
*
* @param topic
* @param waitTime
* @param waitTimeUnit
* @return the consumed message or {@code null} if no message available
*/
public KafkaMessage consume(String topic, long waitTime, TimeUnit waitTimeUnit) {
BlockingQueue> buffer = _getBuffer(topic);
ConsumerRecord cr = buffer.poll();
if (cr == null) {
_fetch(buffer, topic, waitTime, waitTimeUnit);
cr = buffer.poll();
}
return cr != null ? new KafkaMessage(cr) : null;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy