All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.mguenther.kafka.junit.provider.DefaultRecordConsumer Maven / Gradle / Ivy

Go to download

Provides an embedded Kafka cluster consisting of Apache ZooKeeper, Apache Kafka Brokers and Kafka Connect workers in distributed mode along with a rich set of convenient accessors and fault injectors to interact with the embedded Kafka cluster. Supports working against external clusters as well.

There is a newer version: 3.6.0
Show newest version
package net.mguenther.kafka.junit.provider;

import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import net.mguenther.kafka.junit.KeyValue;
import net.mguenther.kafka.junit.KeyValueMetadata;
import net.mguenther.kafka.junit.ObserveKeyValues;
import net.mguenther.kafka.junit.ReadKeyValues;
import net.mguenther.kafka.junit.RecordConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.stream.Collectors;

@Slf4j
@RequiredArgsConstructor
public class DefaultRecordConsumer implements RecordConsumer {

    private final String bootstrapServers;

    @Override
    public  List readValues(final ReadKeyValues readRequest) throws InterruptedException {
        final List> kvs = read(readRequest);
        return Collections.unmodifiableList(kvs.stream().map(KeyValue::getValue).collect(Collectors.toList()));
    }

    @Override
    public  List> read(final ReadKeyValues readRequest) throws InterruptedException {
        final List> consumedRecords = new ArrayList<>();
        final KafkaConsumer consumer = new KafkaConsumer<>(effectiveConsumerProps(readRequest.getConsumerProps()));
        final int pollIntervalMillis = 100;
        final int limit = readRequest.getLimit();
        final Predicate filterOnKeys = readRequest.getFilterOnKeys();
        final Predicate filterOnValues = readRequest.getFilterOnValues();
        consumer.subscribe(Collections.singletonList(readRequest.getTopic()));
        int totalPollTimeMillis = 0;
        boolean assignmentsReady = false;
        while (totalPollTimeMillis < readRequest.getMaxTotalPollTimeMillis() && continueConsuming(consumedRecords.size(), limit)) {
            final ConsumerRecords records = consumer.poll(pollIntervalMillis);
            // Kafka exchanges partition assignments and revocations between broker and consumers via the
            // poll mechanism. So, if we attempt to seek to an dedicated offset for a topic-partition for
            // which the consumer has not been notified of its assignment yet (there was no previous call
            // to poll for instance), then the seek operation will fail. Hence, we call poll and then check
            // if we have to seek. If did indeed seek, we omit the records returned by the first poll and
            // do it over. If we did not seek then everything is back to normal and we process the already
            // obtained records regularly.
            if (!assignmentsReady) {
                assignmentsReady = true;
                if (seekIfNecessary(readRequest, consumer)) continue;
            }
            for (ConsumerRecord record : records) {
                if (filterOnKeys.test(record.key()) && filterOnValues.test(record.value())) {
                    final KeyValue kv = readRequest.isIncludeMetadata() ?
                            new KeyValue<>(record.key(), record.value(), record.headers(), new KeyValueMetadata(record.topic(), record.partition(), record.offset())) :
                            new KeyValue<>(record.key(), record.value(), record.headers());

                    consumedRecords.add(kv);
                }
            }
            totalPollTimeMillis += pollIntervalMillis;
        }
        consumer.commitSync();
        consumer.close();
        return Collections.unmodifiableList(consumedRecords);
    }

    private  boolean seekIfNecessary(final ReadKeyValues readRequest, final KafkaConsumer consumer) {
        final boolean shouldSeek = readRequest.getSeekTo().size() > 0;
        if (shouldSeek) {
            readRequest
                    .getSeekTo()
                    .keySet()
                    .stream()
                    .map(partition -> new TopicPartition(readRequest.getTopic(), partition))
                    .peek(topicPartition -> log.info("Seeking to offset {} of topic-partition {}.", readRequest.getSeekTo().get(topicPartition.partition()), topicPartition))
                    .forEach(topicPartition -> consumer.seek(topicPartition, readRequest.getSeekTo().get(topicPartition.partition())));
        }
        return shouldSeek;
    }

    private static boolean continueConsuming(final int messagesConsumed, final int maxMessages) {
        return maxMessages <= 0 || messagesConsumed < maxMessages;
    }

    @Override
    public  List observeValues(final ObserveKeyValues observeRequest) throws InterruptedException {
        final List totalConsumedRecords = new ArrayList<>(observeRequest.getExpected());
        final long startNanos = System.nanoTime();
        final ReadKeyValues initialReadRequest = withPartitionSeekForReadValues(observeRequest);
        final ReadKeyValues subsequentReadRequests = withoutPartitionSeekForReadValues(observeRequest);
        boolean firstRequest = true;
        while (true) {
            final List consumedRecords = firstRequest ?
                    readValues(initialReadRequest) :
                    readValues(subsequentReadRequests);
            totalConsumedRecords.addAll(consumedRecords);
            if (firstRequest) firstRequest = false;
            if (totalConsumedRecords.size() >= observeRequest.getExpected()) break;
            if (System.nanoTime() > startNanos + TimeUnit.MILLISECONDS.toNanos(observeRequest.getObservationTimeMillis())) {
                final String message = String.format("Expected %s records, but consumed only %s records before ran " +
                                "into timeout (%s ms).",
                        observeRequest.getExpected(),
                        totalConsumedRecords.size(),
                        observeRequest.getObservationTimeMillis());
                throw new AssertionError(message);
            }
            Thread.sleep(Math.min(observeRequest.getObservationTimeMillis(), 100));
        }
        return Collections.unmodifiableList(totalConsumedRecords);
    }

    private  ReadKeyValues withPartitionSeekForReadValues(final ObserveKeyValues observeRequest) {
        return toReadValuesRequest(observeRequest).seekTo(observeRequest.getSeekTo()).build();
    }

    private  ReadKeyValues withoutPartitionSeekForReadValues(final ObserveKeyValues observeRequest) {
        return toReadValuesRequest(observeRequest).build();
    }

    private  ReadKeyValues.ReadKeyValuesBuilder toReadValuesRequest(final ObserveKeyValues observeRequest) {
        return ReadKeyValues.from(observeRequest.getTopic(), observeRequest.getClazzOfV())
                .withAll(observeRequest.getConsumerProps())
                .withLimit(observeRequest.getExpected())
                .withMetadata(false)
                .filterOnKeys(observeRequest.getFilterOnKeys())
                .filterOnValues(observeRequest.getFilterOnValues())
                .with(ConsumerConfig.GROUP_ID_CONFIG, observeRequest.getConsumerProps().getProperty(ConsumerConfig.GROUP_ID_CONFIG));
    }

    @Override
    public  List> observe(final ObserveKeyValues observeRequest) throws InterruptedException {
        final List> totalConsumedRecords = new ArrayList<>(observeRequest.getExpected());
        final long startNanos = System.nanoTime();
        final ReadKeyValues initialReadRequest = withPartitionSeek(observeRequest);
        final ReadKeyValues subsequentReadRequests = withoutPartitionSeek(observeRequest);
        boolean firstRequest = true;
        while (true) {
            final List> consumedRecords = firstRequest ?
                    read(initialReadRequest) :
                    read(subsequentReadRequests);
            totalConsumedRecords.addAll(consumedRecords);
            if (firstRequest) firstRequest = false;
            if (totalConsumedRecords.size() >= observeRequest.getExpected()) break;
            if (System.nanoTime() > startNanos + TimeUnit.MILLISECONDS.toNanos(observeRequest.getObservationTimeMillis())) {
                final String message = String.format("Expected %s records, but consumed only %s records before ran " +
                                "into timeout (%s ms).",
                        observeRequest.getExpected(),
                        totalConsumedRecords.size(),
                        observeRequest.getObservationTimeMillis());
                throw new AssertionError(message);
            }
            Thread.sleep(Math.min(observeRequest.getObservationTimeMillis(), 100));
        }
        return Collections.unmodifiableList(totalConsumedRecords);
    }

    private  ReadKeyValues withPartitionSeek(final ObserveKeyValues observeRequest) {
        return toReadKeyValuesRequest(observeRequest).seekTo(observeRequest.getSeekTo()).build();
    }

    private  ReadKeyValues withoutPartitionSeek(final ObserveKeyValues observeRequest) {
        return toReadKeyValuesRequest(observeRequest).build();
    }

    private  ReadKeyValues.ReadKeyValuesBuilder toReadKeyValuesRequest(final ObserveKeyValues observeRequest) {
        return ReadKeyValues.from(observeRequest.getTopic(), observeRequest.getClazzOfK(), observeRequest.getClazzOfV())
                .withAll(observeRequest.getConsumerProps())
                .withLimit(observeRequest.getExpected())
                .withMetadata(observeRequest.isIncludeMetadata())
                .filterOnKeys(observeRequest.getFilterOnKeys())
                .filterOnValues(observeRequest.getFilterOnValues())
                .with(ConsumerConfig.GROUP_ID_CONFIG, observeRequest.getConsumerProps().getProperty(ConsumerConfig.GROUP_ID_CONFIG));
    }

    private Properties effectiveConsumerProps(final Properties originalConsumerProps) {
        final Properties effectiveConsumerProps = new Properties();
        effectiveConsumerProps.putAll(originalConsumerProps);
        effectiveConsumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        return effectiveConsumerProps;
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy