
org.reactivecommons.async.kafka.communications.ReactiveMessageSender Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of async-kafka Show documentation
Show all versions of async-kafka Show documentation
Abstract your broker with semantic async messages
package org.reactivecommons.async.kafka.communications;
import lombok.SneakyThrows;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.reactivecommons.async.commons.converters.MessageConverter;
import org.reactivecommons.async.kafka.KafkaMessage;
import org.reactivecommons.async.kafka.communications.topology.TopologyCreator;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoSink;
import reactor.kafka.sender.KafkaSender;
import reactor.kafka.sender.SenderRecord;
import reactor.kafka.sender.SenderResult;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
public class ReactiveMessageSender {
private final ConcurrentHashMap> confirmations = new ConcurrentHashMap<>();
private final CopyOnWriteArrayList>> fluxSinks = new CopyOnWriteArrayList<>();
private final AtomicLong counter = new AtomicLong();
private final ExecutorService executorServiceConfirm = Executors.newFixedThreadPool(13, r -> new Thread(r, "KMessageSender1-" + counter.getAndIncrement()));
private final ExecutorService executorServiceEmit = Executors.newFixedThreadPool(13, r -> new Thread(r, "KMessageSender2-" + counter.getAndIncrement()));
private final int senderCount = 4;
private final MessageConverter messageConverter;
private final TopologyCreator topologyCreator;
public ReactiveMessageSender(KafkaSender sender, MessageConverter messageConverter,
TopologyCreator topologyCreator) {
this.messageConverter = messageConverter;
this.topologyCreator = topologyCreator;
for (int i = 0; i < senderCount; ++i) {
Flux> source = Flux.create(fluxSinks::add);
sender.send(source)
.doOnNext(this::confirm)
.subscribe();
}
}
public Mono send(V message) {
return Mono.create(sink -> {
SenderRecord record = createRecord(message);
confirmations.put(record.key(), sink);
executorServiceEmit.submit(() -> fluxSinks.get((int) (System.currentTimeMillis() % senderCount)).next(record));
});
}
private void confirm(SenderResult result) {
executorServiceConfirm.submit(() -> {
MonoSink sink = confirmations.remove(result.correlationMetadata());
if (sink != null) {
if (result.exception() != null) {
sink.error(result.exception());
} else {
sink.success();
}
}
});
}
private SenderRecord createRecord(V object) {
KafkaMessage message = (KafkaMessage) messageConverter.toMessage(object);
ProducerRecord record = createProducerRecord(message);
return SenderRecord.create(record, message.getProperties().getKey()); // TODO: Review for Request-Reply
}
@SneakyThrows
private ProducerRecord createProducerRecord(KafkaMessage message) {
topologyCreator.checkTopic(message.getProperties().getTopic());
List headers = message.getProperties().getHeaders().entrySet().stream()
.map(entry -> new RecordHeader(entry.getKey(), entry.getValue()
.toString().getBytes(StandardCharsets.UTF_8)))
.collect(Collectors.toList());
return new ProducerRecord<>(message.getProperties().getTopic(), null,
message.getProperties().getKey(), message.getBody(), headers);
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy