All Downloads are FREE. Search and download functionalities are using the official Maven repository.

reactor.kafka.receiver.internals.DefaultKafkaReceiver Maven / Gradle / Ivy

/*
 * Copyright (c) 2016-2023 VMware Inc. or its affiliates, All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package reactor.kafka.receiver.internals;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.RetriableCommitFailedException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.RebalanceInProgressException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import reactor.kafka.receiver.KafkaReceiver;
import reactor.kafka.receiver.ReceiverOptions;
import reactor.kafka.receiver.ReceiverRecord;
import reactor.kafka.receiver.observation.KafkaReceiverObservation;
import reactor.kafka.receiver.observation.KafkaRecordReceiverContext;
import reactor.kafka.sender.TransactionManager;

import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;

public class DefaultKafkaReceiver implements KafkaReceiver {

    private static final Logger log = LoggerFactory.getLogger(DefaultKafkaReceiver.class);

    private final ConsumerFactory consumerFactory;

    private final ReceiverOptions receiverOptions;

    private final String receiverId;

    Predicate isRetriableException = t -> RetriableCommitFailedException.class.isInstance(t)
        || RebalanceInProgressException.class.isInstance(t);

    final AtomicReference> consumerHandlerRef = new AtomicReference<>();

    public DefaultKafkaReceiver(ConsumerFactory consumerFactory, ReceiverOptions receiverOptions) {
        this.consumerFactory = consumerFactory;
        this.receiverOptions = receiverOptions;
        receiverId =
            Optional.ofNullable(receiverOptions.clientId())
                .filter(clientId -> !clientId.isEmpty())
                .orElse("reactor-kafka-receiver-" + System.identityHashCode(this));
    }

    @Override
    public Flux> receive(Integer prefetch) {
        return withHandler(AckMode.MANUAL_ACK, (scheduler, handler) -> {
            int prefetchCalculated = preparePublishOnQueueSize(prefetch);
            return handler
                .receive()
                .publishOn(scheduler, prefetchCalculated)
                .flatMapIterable(it -> it, prefetchCalculated)
                .doOnNext(this::observerRecord)
                .map(record -> new ReceiverRecord<>(
                    record,
                    handler.toCommittableOffset(record)
                ));
        });
    }

    @Override
    public Flux>> receiveBatch(Integer prefetch) {
        return withHandler(AckMode.MANUAL_ACK, (scheduler, handler) -> {
            int prefetchCalculated = preparePublishOnQueueSize(prefetch);
            return handler
                .receive()
                .filter(it -> !it.isEmpty())
                .publishOn(scheduler, prefetchCalculated)
                .map(records -> Flux.fromIterable(records)
                    .map(record -> new ReceiverRecord<>(
                        record,
                        handler.toCommittableOffset(record)
                    ))
                );
        });
    }

    @Override
    public Flux>> receiveAutoAck(Integer prefetch) {
        return withHandler(AckMode.AUTO_ACK, (scheduler, handler) -> handler
            .receive()
            .filter(it -> !it.isEmpty())
            .publishOn(scheduler, preparePublishOnQueueSize(prefetch))
            .map(consumerRecords -> Flux.fromIterable(consumerRecords)
                .doOnNext(this::observerRecord)
                .doAfterTerminate(() -> {
                    for (ConsumerRecord r : consumerRecords) {
                        handler.acknowledge(r);
                    }
                })));
    }

    @Override
    public Flux> receiveAtmostOnce(Integer prefetch) {
        return withHandler(AckMode.ATMOST_ONCE, (scheduler, handler) -> handler
            .receive()
            .concatMap(records -> Flux
                .fromIterable(records)
                .doOnNext(this::observerRecord)
                .concatMap(r -> handler.commit(r).thenReturn(r))
                .publishOn(scheduler, 1), preparePublishOnQueueSize(prefetch)));
    }

    @Override
    public Flux>> receiveExactlyOnce(TransactionManager transactionManager, Integer prefetch) {
        return withHandler(AckMode.EXACTLY_ONCE, (scheduler, handler) -> {
            Flux>> resultFlux =
                handler
                    .receive()
                    .filter(it -> !it.isEmpty())
                    .map(consumerRecords -> {
                        CommittableBatch offsetBatch = new CommittableBatch();
                        for (ConsumerRecord r : consumerRecords) {
                            offsetBatch.updateOffset(new TopicPartition(r.topic(), r.partition()), r.offset());
                        }

                        return transactionManager.begin()
                            .thenMany(Flux.defer(() -> {
                                handler.awaitingTransaction.getAndSet(true);
                                return Flux.fromIterable(consumerRecords);
                            }))
                            .concatWith(transactionManager
                                .sendOffsets(offsetBatch
                                        .getAndClearOffsets()
                                        .offsets(),
                                    handler.consumer.groupMetadata()))
                            .doOnNext(this::observerRecord)
                            .doAfterTerminate(() -> handler.awaitingTransaction.set(false));
                    });
            return resultFlux.publishOn(transactionManager.scheduler(), preparePublishOnQueueSize(prefetch));
        });
    }

    private > void observerRecord(R record) {
        KafkaReceiverObservation.RECEIVER_OBSERVATION.observation(receiverOptions.observationConvention(),
                KafkaReceiverObservation.DefaultKafkaReceiverObservationConvention.INSTANCE,
                () -> new KafkaRecordReceiverContext(record,
                    receiverId,
                    receiverOptions.bootstrapServers()),
                receiverOptions.observationRegistry())
            .observe(() -> log.trace("[{}] received: {}", receiverId, record));
    }

    @Override
    public  Mono doOnConsumer(Function, ? extends T> function) {
        ConsumerHandler consumerHandler = consumerHandlerRef.get();
        if (consumerHandler == null) {
            // TODO deprecate this method, expose ConsumerHandler
            return Mono.error(new IllegalStateException("You must call one of receive*() methods before using doOnConsumer"));
        }
        return consumerHandler.doOnConsumer(function);
    }

    private  Flux withHandler(AckMode ackMode, BiFunction, Flux> function) {
        return Flux.usingWhen(
            Mono.fromCallable(() -> {
                ConsumerHandler consumerHandler = new ConsumerHandler<>(
                    receiverOptions,
                    consumerFactory.createConsumer(receiverOptions),
                    // Always use the currently set value
                    e -> isRetriableException.test(e),
                    ackMode
                );
                consumerHandlerRef.set(consumerHandler);
                return consumerHandler;
            }),
            handler -> Flux.using(
                () -> Schedulers.single(receiverOptions.schedulerSupplier().get()),
                scheduler -> function.apply(scheduler, handler),
                Scheduler::dispose
            ),
            handler -> handler.close().doFinally(__ -> consumerHandlerRef.compareAndSet(handler, null))
        );
    }

    private int preparePublishOnQueueSize(Integer prefetch) {
        return prefetch != null ? prefetch : 1;
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy