net.pincette.jes.util.Kafka Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pincette-jes-util Show documentation
Show all versions of pincette-jes-util Show documentation
Utilities to work with pincette-jes
package net.pincette.jes.util;
import static java.util.Collections.unmodifiableMap;
import static java.util.stream.Collectors.toMap;
import static net.pincette.jes.util.JsonFields.CORR;
import static net.pincette.util.Collections.map;
import static net.pincette.util.Collections.merge;
import static net.pincette.util.Pair.pair;
import static org.apache.kafka.streams.kstream.JoinWindows.of;
import com.typesafe.config.Config;
import java.time.Duration;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import javax.json.JsonObject;
import net.pincette.util.Pair;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.kstream.KStream;
/**
* Some Kafka utilities.
*
* @author Werner Donn\u00e9
* @since 1.0
*/
public class Kafka {
private static final Map RELIABLE_PRODUCER_CONFIG =
unmodifiableMap(
map(
pair("acks", "all"),
pair("enable.idempotence", true),
pair("request.timeout.ms", 5000),
// Lower than replica.lag.time.max.ms, so few retries because that may generate
// a lot of duplicates.
pair("max.in.flight.requests.per.connection", 1)));
private Kafka() {}
/**
* Joins two streams on the _corr
field. The result is keyed on that field, with the
* values of both streams paired.
*
* @param stream1 the first stream.
* @param stream2 the second stream.
* @param window the join window.
* @return The joined stream of pairs.
* @since 1.1.4
*/
public static KStream> correlate(
final KStream stream1,
final KStream stream2,
final Duration window) {
return toCorr(stream1).join(toCorr(stream2), Pair::pair, of(window));
}
/**
* This creates a fail fast Kafka producer that demands full acknowledgement of sent messages.
*
* @param config the Kafka configuration, which will be merged with a reliable built-in
* configuration.
* @param keySerializer the serializer for keys.
* @param valueSerializer the serializer for values.
* @param the key type.
* @param the value type.
* @return The Kafka producer.
* @since 1.0
*/
public static KafkaProducer createReliableProducer(
final Map config,
final Serializer keySerializer,
final Serializer valueSerializer) {
return new KafkaProducer<>(
merge(config, RELIABLE_PRODUCER_CONFIG), keySerializer, valueSerializer);
}
/**
* Gets the configuration object at path
in config
and flattens the tree
* under it so that the keys in the resulting map are dot-separated paths as Kafka expects it.
*
* @param config the given configuration object.
* @param path the dot-separated path withing the configuration object.
* @return The Kafka configuration.
* @since 1.0
*/
public static Map fromConfig(final Config config, final String path) {
return config.getConfig(path).entrySet().stream()
.collect(toMap(Map.Entry::getKey, e -> e.getValue().unwrapped()));
}
/**
* Sends a message to Kafka asynchronously.
*
* @param producer the used producer.
* @param record the record to be sent.
* @param the key type.
* @param the value type.
* @return true
if the request was successful, false
otherwise.
* @since 1.0
*/
public static CompletionStage send(
final KafkaProducer producer, final ProducerRecord record) {
final CompletableFuture completableFuture = new CompletableFuture<>();
producer.send(
record,
(metadata, exception) -> {
if (exception != null) {
completableFuture.completeExceptionally(exception);
} else {
completableFuture.complete(true);
}
});
return completableFuture;
}
private static KStream toCorr(final KStream stream) {
return stream
.filter((k, v) -> v.containsKey(CORR))
.map((k, v) -> new KeyValue<>(v.getString(CORR).toLowerCase(), v));
}
}