All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.springframework.kafka.core.reactive.ReactiveKafkaConsumerTemplate Maven / Gradle / Ivy

There is a newer version: 3.1.4
Show newest version
/*
 * Copyright 2019-2020 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.springframework.kafka.core.reactive;

import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import org.springframework.util.Assert;

import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.kafka.receiver.KafkaReceiver;
import reactor.kafka.receiver.ReceiverOptions;
import reactor.kafka.receiver.ReceiverRecord;
import reactor.kafka.sender.TransactionManager;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;

/**
 * Reactive kafka consumer operations implementation.
 *
 * @param  the key type.
 * @param  the value type.
 *
 * @author Mark Norkin
 *
 * @since 2.3.0
 */
public class ReactiveKafkaConsumerTemplate {

	private final KafkaReceiver kafkaReceiver;

	public ReactiveKafkaConsumerTemplate(ReceiverOptions receiverOptions) {
		Assert.notNull(receiverOptions, "Receiver options can not be null");
		this.kafkaReceiver = KafkaReceiver.create(receiverOptions);
	}

	public Flux> receive() {
		return this.kafkaReceiver.receive();
	}

	public Flux> receiveAutoAck() {
		return this.kafkaReceiver.receiveAutoAck().flatMap(Function.identity());
	}

	public Flux> receiveAtMostOnce() {
		return this.kafkaReceiver.receiveAtmostOnce();
	}

	/**
	 * Returns a {@link Flux} of consumer record batches that may be used for exactly once
	 * delivery semantics. A new transaction is started for each inner Flux and it is the
	 * responsibility of the consuming application to commit or abort the transaction
	 * using {@link TransactionManager#commit()} or {@link TransactionManager#abort()}
	 * after processing the Flux. The next batch of consumer records will be delivered only
	 * after the previous flux terminates. Offsets of records dispatched on each inner Flux
	 * are committed using the provided transactionManager within the transaction
	 * started for that Flux.
	 * 

Example usage: *

	 * {@code
	 * KafkaSender sender = sender(senderOptions());
	 * ReceiverOptions receiverOptions = receiverOptions(Collections.singleton(sourceTopic));
	 * KafkaReceiver receiver = KafkaReceiver.create(receiverOptions);
	 * receiver.receiveExactlyOnce(sender.transactionManager())
	 * 	 .concatMap(f -> sendAndCommit(f))
	 *	 .onErrorResume(e -> sender.transactionManager().abort().then(Mono.error(e)))
	 *	 .doOnCancel(() -> close());
	 *
	 * Flux> sendAndCommit(Flux> flux) {
	 * 	return sender.send(flux.map(r -> SenderRecord.create(transform(r.value()), r.key())))
	 *			.concatWith(sender.transactionManager().commit());
	 * }
	 * }
	 * 
* @param transactionManager Transaction manager used to begin new transaction for each * inner Flux and commit offsets within that transaction * @return Flux of consumer record batches processed within a transaction */ public Flux>> receiveExactlyOnce(TransactionManager transactionManager) { return this.kafkaReceiver.receiveExactlyOnce(transactionManager); } public Mono doOnConsumer(Function, ? extends T> function) { return this.kafkaReceiver.doOnConsumer(function); } public Flux assignment() { Mono> partitions = doOnConsumer(Consumer::assignment); return partitions.flatMapIterable(Function.identity()); } public Flux subscription() { Mono> subscriptions = doOnConsumer(Consumer::subscription); return subscriptions.flatMapIterable(Function.identity()); } public Mono seek(TopicPartition partition, long offset) { return doOnConsumer(consumer -> { consumer.seek(partition, offset); return null; }); } public Mono seekToBeginning(TopicPartition... partitions) { return doOnConsumer(consumer -> { consumer.seekToBeginning(Arrays.asList(partitions)); return null; }); } public Mono seekToEnd(TopicPartition... partitions) { return doOnConsumer(consumer -> { consumer.seekToEnd(Arrays.asList(partitions)); return null; }); } public Mono position(TopicPartition partition) { return doOnConsumer(consumer -> consumer.position(partition)); } public Mono> committed(Set partitions) { return doOnConsumer(consumer -> consumer.committed(partitions)); } public Flux partitionsFromConsumerFor(String topic) { Mono> partitions = doOnConsumer(c -> c.partitionsFor(topic)); return partitions.flatMapIterable(Function.identity()); } public Flux paused() { Mono> paused = doOnConsumer(Consumer::paused); return paused.flatMapIterable(Function.identity()); } public Mono pause(TopicPartition... partitions) { return doOnConsumer(c -> { c.pause(Arrays.asList(partitions)); return null; }); } public Mono resume(TopicPartition... partitions) { return doOnConsumer(c -> { c.resume(Arrays.asList(partitions)); return null; }); } public Flux> metricsFromConsumer() { return doOnConsumer(Consumer::metrics) .flatMapIterable(Map::entrySet) .map(m -> Tuples.of(m.getKey(), m.getValue())); } public Flux>> listTopics() { return doOnConsumer(Consumer::listTopics) .flatMapIterable(Map::entrySet) .map(topicAndPartition -> Tuples.of(topicAndPartition.getKey(), topicAndPartition.getValue())); } public Flux> offsetsForTimes( Map timestampsToSearch) { return doOnConsumer(c -> c.offsetsForTimes(timestampsToSearch)) .flatMapIterable(Map::entrySet) .map(partitionAndOffset -> Tuples.of(partitionAndOffset.getKey(), partitionAndOffset.getValue())); } public Flux> beginningOffsets(TopicPartition... partitions) { return doOnConsumer(c -> c.beginningOffsets(Arrays.asList(partitions))) .flatMapIterable(Map::entrySet) .map(partitionsOffsets -> Tuples.of(partitionsOffsets.getKey(), partitionsOffsets.getValue())); } public Flux> endOffsets(TopicPartition... partitions) { return doOnConsumer(c -> c.endOffsets(Arrays.asList(partitions))) .flatMapIterable(Map::entrySet) .map(partitionsOffsets -> Tuples.of(partitionsOffsets.getKey(), partitionsOffsets.getValue())); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy