All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.github.shoothzj.kafka.consumer.ConsumerDemo Maven / Gradle / Ivy

package com.github.shoothzj.kafka.consumer;

import com.github.shoothzj.kafka.KafkaConstant;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.WakeupException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.function.Consumer;
import java.util.regex.Pattern;

import static java.util.stream.Collectors.joining;

public class ConsumerDemo {

    private static final Logger log = LoggerFactory.getLogger(ConsumerDemo.class);

    public KafkaConsumer createConsumer() {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
                KafkaConstant.kafkaIpList.stream().map(s -> s + ":9092").collect(joining(",")));
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "ShootHzj");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaConstant.defaultDeserializer);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaConstant.defaultDeserializer);

        KafkaConsumer consumer = new KafkaConsumer<>(props);
        return consumer;
    }

    /**
     * subscribe topic
     */
    public void subscribeExample() {
        createConsumer().subscribe(Collections.singletonList("Topic"));
        createConsumer().subscribe(Pattern.compile("test.*"));
    }

    /**
     * simple consumer example
     * You can't have multiple consumers that belong to the same group in one thread.
     * You can't have multiple threads safely use the same consumer
     * reason:
     * https://stackoverflow.com/questions/51231381/why-multi-kafka-consumer-object-makes-it-always-rebalancing-and-not-work
     */
    public void consumeExample() {
        KafkaConsumer consumer = createConsumer();
        try {
            while (true) {
                ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
                records.forEach(record -> {
                    log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                            record.topic(), record.partition(), record.offset(),
                            record.key(), record.value());
                });
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * sync commit offset
     */
    public void consumeSyncCommit() {
        KafkaConsumer consumer = createConsumer();
        while (true) {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
            records.forEach(record -> {
                log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                        record.topic(), record.partition(), record.offset(),
                        record.key(), record.value());
            });
            try {
                consumer.commitSync();
            } catch (Exception e) {
                log.error("commit failed", e);
            }
        }
    }

    public void consumeAsyncCommit() {
        KafkaConsumer consumer = createConsumer();
        while (true) {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
            records.forEach(record -> {
                log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                        record.topic(), record.partition(), record.offset(),
                        record.key(), record.value());
            });
            //commit Async不会重试
            consumer.commitAsync();
        }
    }

    public void consumeAsyncCommitWithCallback() {
        KafkaConsumer consumer = createConsumer();
        while (true) {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
            records.forEach(record -> {
                log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                        record.topic(), record.partition(), record.offset(),
                        record.key(), record.value());
            });
            //commit Async不会重试
            consumer.commitAsync((offsets, exception) -> {
                if (exception != null) {
                    log.error("Commit failed for offsets {}", offsets, exception);
                }
            });
        }
    }

    /**
     * onPartitionRevoked
     */
    public void onPartitionRevoked() {
        Map currentOffsets = new HashMap<>();
        List topics = new ArrayList<>();
        KafkaConsumer consumer = createConsumer();
        try {
            consumer.subscribe(topics, new ConsumerRebalanceListener() {
                @Override
                public void onPartitionsRevoked(Collection partitions) {

                }

                @Override
                public void onPartitionsAssigned(Collection partitions) {
                    log.info("Lost partitions in rebalance. Committing offsets: {}", currentOffsets);
                    consumer.commitSync(currentOffsets);
                }
            });
            while (true) {
                ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
                records.forEach(record -> {
                    log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                            record.topic(), record.partition(), record.offset(),
                            record.key(), record.value());
                    currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1, "no metadata"));
                });
                consumer.commitAsync(currentOffsets, null);
            }
        } catch (WakeupException e) {
            // ignore the exception, close the consumer
        } catch (Exception e) {
            log.error("Unexpected error", e);
        } finally {
            try {
                consumer.commitSync(currentOffsets);
            } finally {
                consumer.close();
                log.info("Close consumer and we are done");
            }
        }
    }

    /**
     * On Reassign seek
     */
    public void onReassignSeek() {
        KafkaConsumer consumer = createConsumer();
        List topics = new ArrayList<>();
        consumer.subscribe(topics, new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection partitions) {
                commitDBTransaction();
            }

            @Override
            public void onPartitionsAssigned(Collection partitions) {
                for (TopicPartition partition: partitions) {
                    consumer.seek(partition, getOffsetFromDB(partition));
                }
            }
        });
        // poll once to make sure we join a consumer group and get assigned partitions
        consumer.poll(Duration.ofMillis(0));

        for (TopicPartition partition: consumer.assignment()) {
            consumer.seek(partition, getOffsetFromDB(partition));
        }

        while (true) {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
            records.forEach(new Consumer>() {
                @Override
                public void accept(ConsumerRecord record) {
                    processRecord(record);
                    storeRecordInDB(record);
                    storeOffsetInDB(record.topic(), record.partition(), record.offset());
                }

                private void storeOffsetInDB(String topic, int partition, long offset) {

                }

                private void storeRecordInDB(ConsumerRecord record) {

                }

                private void processRecord(ConsumerRecord record) {
                }
            });
            commitDBTransaction();
        }
    }

    private void commitDBTransaction() {
    }

    private long getOffsetFromDB(TopicPartition partition) {
        return 0;
    }

    /**
     * assign partitions
     */
    public void assignPartitions() {
        KafkaConsumer consumer = createConsumer();
        List partitionInfos = consumer.partitionsFor("topic");
        HashSet set = new HashSet<>();
        if (partitionInfos != null) {
            for (PartitionInfo partitionInfo: partitionInfos) {
                set.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
            }
            consumer.assign(set);

            while (true) {
                ConsumerRecords records = consumer.poll(Duration.ofMillis(1000));
                records.forEach(record -> {
                    log.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}",
                            record.topic(), record.partition(), record.offset(),
                            record.key(), record.value());
                });
                consumer.commitSync();
            }
        }
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy