All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.epam.eco.commons.kafka.helpers.TopicOffsetFetcher Maven / Gradle / Ivy

Go to download

A library of utilities, helpers and higher-level APIs for the Kafka client library

There is a newer version: 3.0.5
Show newest version
/*
 * Copyright 2019 EPAM Systems
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
 * use this file except in compliance with the License.  You may obtain a copy
 * of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */
package com.epam.eco.commons.kafka.helpers;

import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;

import org.apache.commons.lang3.Validate;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import com.epam.eco.commons.kafka.KafkaUtils;
import com.epam.eco.commons.kafka.OffsetRange;
import com.epam.eco.commons.kafka.TopicPartitionComparator;
import com.epam.eco.commons.kafka.config.ConsumerConfigBuilder;

/**
 * @author Andrei_Tytsik
 */
public class TopicOffsetFetcher {

    private final Map consumerConfig;

    private TopicOffsetFetcher(String bootstrapServers, Map consumerConfig) {
        this.consumerConfig = ConsumerConfigBuilder.
                with(consumerConfig).
                bootstrapServers(bootstrapServers).
                minRequiredConfigs().
                enableAutoCommitDisabled().
                clientIdRandom().
                build();
    }

    public static TopicOffsetFetcher with(Map consumerConfig) {
        return new TopicOffsetFetcher(null, consumerConfig);
    }

    public static TopicOffsetFetcher with(String bootstrapServers) {
        return new TopicOffsetFetcher(bootstrapServers, null);
    }

    public Map fetchForPartitions(TopicPartition ... partitions) {
        return fetchForPartitions(
                partitions != null ? Arrays.asList(partitions) : null);
    }

    public Map fetchForPartitions(Collection partitions) {
        Validate.notEmpty(partitions, "Collection of partitions is null or empty");
        Validate.noNullElements(partitions, "Collection of partitions contains null elements");

        try (KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig)) {
            return doFetch(consumer, partitions);
        }
    }

    public Map fetchForTopics(String ... topicNames) {
        return fetchForTopics(
                topicNames != null ? Arrays.asList(topicNames) : null);
    }

    public Map fetchForTopics(Collection topicNames) {
        Validate.notEmpty(topicNames, "Collection of topic names is null or empty");
        Validate.noNullElements(topicNames, "Collection of topic names contains null elements");

        try (KafkaConsumer consumer = new KafkaConsumer<>(consumerConfig)) {
            List partitions =
                    KafkaUtils.getTopicPartitionsAsList(consumer, topicNames);
            return doFetch(consumer, partitions);
        }
    }

    /**
     * Offsets ranges might not be accurate as Kafka doesn't guarantee consecutive offsets (there might be
     * transaction markers which aren't consumable and look like gaps on client side).
     */
    private static Map doFetch(
            Consumer consumer,
            Collection partitions) {
        Map beginningOffsets = consumer.beginningOffsets(partitions);
        Map endOffsets = consumer.endOffsets(partitions);

        Map offsets = new TreeMap<>(TopicPartitionComparator.INSTANCE);
        for (TopicPartition partition : partitions) {
            long offsetAtBeginning = beginningOffsets.get(partition);
            long offsetAtEnd = endOffsets.get(partition);
            offsets.put(
                    partition,
                    new OffsetRange(
                            offsetAtBeginning,
                            offsetAtEnd > offsetAtBeginning ? offsetAtEnd - 1 : offsetAtEnd,
                            offsetAtEnd > offsetAtBeginning));
        }
        return offsets;
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy