com.decathlon.tzatziki.steps.KafkaSteps Maven / Gradle / Ivy
package com.decathlon.tzatziki.steps;
import com.decathlon.tzatziki.kafka.KafkaInterceptor;
import com.decathlon.tzatziki.kafka.SchemaRegistry;
import com.decathlon.tzatziki.utils.Comparison;
import com.decathlon.tzatziki.utils.Guard;
import com.decathlon.tzatziki.utils.Mapper;
import com.decathlon.tzatziki.utils.MockFaster;
import io.cucumber.java.Before;
import io.cucumber.java.en.Given;
import io.cucumber.java.en.Then;
import io.cucumber.java.en.When;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.junit.Assert;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.test.EmbeddedKafkaBroker;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static com.decathlon.tzatziki.kafka.KafkaInterceptor.offsets;
import static com.decathlon.tzatziki.utils.Asserts.awaitUntil;
import static com.decathlon.tzatziki.utils.Asserts.awaitUntilAsserted;
import static com.decathlon.tzatziki.utils.Comparison.COMPARING_WITH;
import static com.decathlon.tzatziki.utils.Guard.GUARD;
import static com.decathlon.tzatziki.utils.Patterns.*;
import static com.decathlon.tzatziki.utils.Unchecked.unchecked;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Locale.ROOT;
import static java.util.Optional.ofNullable;
import static org.assertj.core.api.Assertions.assertThat;
@Slf4j
@SuppressWarnings({"SpringJavaAutowiredMembersInspection", "unchecked"})
public class KafkaSteps {
public static final String RECORD = "(json messages?|" + VARIABLE_PATTERN + ")";
private static final EmbeddedKafkaBroker embeddedKafka = new EmbeddedKafkaBroker(1, true, 1);
private static final Map>> avroJacksonConsumers = new LinkedHashMap<>();
private static final Map> avroConsumers = new LinkedHashMap<>();
private static final Map> jsonConsumers = new LinkedHashMap<>();
private static final Set topicsToAutoSeek = new LinkedHashSet<>();
private static final Set checkedTopics = new LinkedHashSet<>();
private static boolean isStarted;
public static synchronized void start() {
start(null);
}
public static synchronized void start(Map properties) {
if (!isStarted) {
isStarted = true;
if (properties != null) {
embeddedKafka.brokerProperties(properties);
}
embeddedKafka.afterPropertiesSet();
}
SchemaRegistry.initialize();
}
public static void doNotWaitForMembersOn(String topic) {
checkedTopics.add(topic);
}
public static String bootstrapServers() {
return embeddedKafka.getBrokersAsString();
}
private final ObjectSteps objects;
@Autowired(required = false)
private KafkaTemplate avroKafkaTemplate;
@Autowired(required = false)
private KafkaTemplate jsonKafkaTemplate;
@Autowired(required = false)
List> avroJacksonConsumerFactories = new ArrayList<>();
@Autowired(required = false)
ConsumerFactory avroConsumerFactory;
@Autowired(required = false)
ConsumerFactory jsonConsumerFactory;
@Autowired(required = false)
private KafkaListenerEndpointRegistry registry;
public KafkaSteps(ObjectSteps objects) {
this.objects = objects;
}
public static String schemaRegistryUrl() {
return MockFaster.url() + SchemaRegistry.endpoint;
}
public static void autoSeekTopics(String... topics) {
topicsToAutoSeek.addAll(Arrays.asList(topics));
}
@Before
public void before() {
SchemaRegistry.initialize();
KafkaInterceptor.before();
topicsToAutoSeek.forEach(topic -> this.getAllConsumers(topic).forEach(consumer -> {
Map> partitionsByTopic = consumer.listTopics();
if (partitionsByTopic.containsKey(topic)) {
List topicPartitions = partitionsByTopic.get(topic).stream()
.map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition())).collect(Collectors.toList());
if (!consumer.assignment().containsAll(topicPartitions)) {
consumer.assign(topicPartitions);
consumer.commitSync();
}
consumer.seekToEnd(topicPartitions);
KafkaInterceptor.disable();
consumer.partitionsFor(topic).stream()
.map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
.forEach(topicPartition -> {
long position = consumer.position(topicPartition);
log.debug("setting offset of %s topic to %s".formatted(topicPartition.topic(), position));
offsets().put(topicPartition, position);
});
KafkaInterceptor.enable();
}
}));
}
@Given(THAT + GUARD + A + "avro schema:$")
public void an_avro_schema(Guard guard, Object content) {
guard.in(objects, () -> {
Map asMap = Mapper.read(objects.resolve(content));
String name = (String) asMap.get("name");
assertThat(name).isNotNull();
Schema schema = new Schema.Parser().parse(Mapper.toJson(asMap));
objects.add("kafka.schemas." + name.toLowerCase(ROOT), schema);
});
}
@SneakyThrows
@When(THAT + GUARD + A + RECORD + " (?:is|are)? published on the " + VARIABLE + " topic:$")
public void we_publish_on_a_topic(Guard guard, String name, String topic, Object content) {
guard.in(objects, () -> publish(name, topic, content));
}
@Deprecated(forRemoval = true)
@When(THAT + GUARD + A + RECORD + " (?:is|are)? received on the " + VARIABLE + " topic:$")
public void a_message_is_received_on_a_topic(Guard guard, String name, String topic, Object content) {
a_message_is_consumed_from_a_topic(guard, name, false, topic, content);
}
@Deprecated(forRemoval = true)
@When(THAT + GUARD + A_USER + "receives? " + A + VARIABLE + " on the topic " + VARIABLE + ":$")
public void we_receive_a_message_on_a_topic(Guard guard, String name, String topic, Object content) {
a_message_is_consumed_from_a_topic(guard, name, false, topic, content);
}
@SneakyThrows
@When(THAT + GUARD + A + RECORD + " (?:is|are)? (successfully )?consumed from the " + VARIABLE + " topic:$")
public void a_message_is_consumed_from_a_topic(Guard guard, String name, boolean successfully, String topic, Object content) {
guard.in(objects, () -> {
KafkaInterceptor.awaitForSuccessfullOnly = successfully;
if (!checkedTopics.contains(topic)) {
try (Admin admin = Admin.create(avroConsumerFactory.getConfigurationProperties())) {
awaitUntil(() -> {
List groupIds = admin.listConsumerGroups().all().get().stream().map(ConsumerGroupListing::groupId).toList();
Map> groupDescriptions = admin.describeConsumerGroups(groupIds).describedGroups();
return groupIds.stream()
.anyMatch(groupId -> unchecked(() -> groupDescriptions.get(groupId).get())
.members().stream()
.anyMatch(member -> member
.assignment()
.topicPartitions()
.stream()
.anyMatch(topicPartition -> {
log.debug("groupid %s is listening on topic %s".formatted(groupId, topic));
return topicPartition.topic().equals(topic);
}))
);
});
checkedTopics.add(topic);
}
}
List results = publish(name, topic, content)
.parallelStream()
.map(KafkaInterceptor::waitUntilProcessed)
.map(SendResult::getRecordMetadata)
.collect(Collectors.toList());
log.debug("processed {}", results);
KafkaInterceptor.awaitForSuccessfullOnly = false;
});
}
@SneakyThrows
@When(THAT + GUARD + "the " + VARIABLE + " group id has fully consumed the " + VARIABLE + " topic$")
public void topic_has_been_consumed_on_every_partition(Guard guard, String groupId, String topic) {
guard.in(objects, () -> awaitUntilAsserted(() -> getAllConsumers(topic).forEach(consumer -> unchecked(() -> {
try (Admin admin = Admin.create(avroConsumerFactory.getConfigurationProperties())) {
Map topicPartitionOffsetAndMetadataMap = admin
.listConsumerGroupOffsets(groupId)
.partitionsToOffsetAndMetadata().get();
TopicPartition key = new TopicPartition(topic, 0);
if (topicPartitionOffsetAndMetadataMap.containsKey(key)) {
long offset = topicPartitionOffsetAndMetadataMap.get(key).offset();
consumer.endOffsets(List.of(key))
.forEach((topicPartition, endOffset) -> Assert.assertEquals((long) endOffset, offset));
} else {
throw new AssertionError("let's wait a bit more");
}
}
}))));
}
@Given(THAT + "the current offset of " + VARIABLE + " on the topic " + VARIABLE + " is (\\d+)$")
public void that_the_current_offset_the_groupid_on_topic_is(String groupId, String topic, long offset) throws ExecutionException, InterruptedException {
try (Admin admin = Admin.create(avroConsumerFactory.getConfigurationProperties())) {
admin.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();
TopicPartition topicPartition = new TopicPartition(topic, 0);
Collection members = admin.describeConsumerGroups(List.of(groupId)).describedGroups().get(groupId).get().members();
if (!members.isEmpty()) {
try {
admin.removeMembersFromConsumerGroup(groupId, new RemoveMembersFromConsumerGroupOptions()).all().get();
} catch (InterruptedException e) {
// Restore interrupted state...
Thread.currentThread().interrupt();
} catch (Exception e) {
// this could happen if the consumer group emptied itself meanwhile
}
}
admin.alterConsumerGroupOffsets(groupId, Map.of(topicPartition, new OffsetAndMetadata(offset + KafkaInterceptor.adjustedOffsetFor(topicPartition))))
.partitionResult(topicPartition)
.get();
}
}
@Then(THAT + GUARD + "the " + VARIABLE + " topic contains" + COMPARING_WITH + " " + A + RECORD + ":$")
public void the_topic_contains(Guard guard, String topic, Comparison comparison, String name, String content) {
guard.in(objects, () -> {
Consumer consumer = getConsumer(name, topic);
List topicPartitions = awaitTopicPartitions(topic, consumer);
if (!consumer.assignment().containsAll(topicPartitions)) {
consumer.assign(topicPartitions);
consumer.seekToBeginning(topicPartitions);
consumer.commitSync();
}
try {
ConsumerRecords records = consumer.poll(Duration.ofSeconds(1));
List
© 2015 - 2025 Weber Informatics LLC | Privacy Policy