All Downloads are FREE. Search and download functionalities are using the official Maven repository.

reactor.kafka.receiver.internals.ConsumerHandler Maven / Gradle / Ivy

/*
 * Copyright (c) 2020-2023 VMware Inc. or its affiliates, All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package reactor.kafka.receiver.internals;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
import reactor.core.Disposable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Sinks;
import reactor.core.scheduler.Scheduler;
import reactor.kafka.receiver.KafkaReceiver;
import reactor.kafka.receiver.ReceiverOffset;
import reactor.kafka.receiver.ReceiverOptions;
import reactor.kafka.receiver.ReceiverOptions.ConsumerListener;

import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Proxy;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.function.Predicate;

/**
 * A helper class that holds the state of a current receive "session".
 * To be exposed as a public class in the next major version (a subject to the API review).
 */
class ConsumerHandler {

    /** Note: Methods added to this set should also be included in javadoc for {@link KafkaReceiver#doOnConsumer(Function)} */
    private static final Set DELEGATE_METHODS = new HashSet<>(Arrays.asList(
        "assignment",
        "subscription",
        "seek",
        "seekToBeginning",
        "seekToEnd",
        "position",
        "committed",
        "metrics",
        "partitionsFor",
        "listTopics",
        "paused",
        "pause",
        "resume",
        "offsetsForTimes",
        "beginningOffsets",
        "endOffsets",
        "currentLag"
    ));

    private static final AtomicInteger COUNTER = new AtomicInteger();

    final AtomicBoolean awaitingTransaction = new AtomicBoolean();

    private final AtmostOnceOffsets atmostOnceOffsets = new AtmostOnceOffsets();

    private final ReceiverOptions receiverOptions;

    final Consumer consumer;

    private final Scheduler eventScheduler;

    private final ConsumerEventLoop consumerEventLoop;

    private final Sinks.Many> sink =
        Sinks.many().unicast().onBackpressureBuffer();

    private final ConsumerListener consumerListener;

    private final String consumerId;

    private Consumer consumerProxy;

    ConsumerHandler(
        ReceiverOptions receiverOptions,
        Consumer consumer,
        Predicate isRetriableException,
        AckMode ackMode
    ) {
        this.receiverOptions = receiverOptions;
        this.consumer = consumer;
        consumerListener = receiverOptions.consumerListener();
        consumerId = "reactor-kafka-" + receiverOptions.groupId() + "-" + COUNTER.incrementAndGet();
        if (consumerListener != null) {
            consumerListener.consumerAdded(consumerId, consumer);
        }

        eventScheduler = KafkaSchedulers.newEvent(receiverOptions.groupId());

        consumerEventLoop = new ConsumerEventLoop<>(
            ackMode,
            atmostOnceOffsets,
            receiverOptions,
            eventScheduler,
            consumer,
            isRetriableException,
            sink,
            awaitingTransaction
        );
        eventScheduler.init();
    }

    public Flux> receive() {
        return sink.asFlux().doOnRequest(consumerEventLoop::onRequest);
    }

    public Mono close() {
        if (consumerListener != null) {
            consumerListener.consumerRemoved(consumerId, consumer);
        }
        return consumerEventLoop.stop().doFinally(__ -> eventScheduler.dispose());
    }

    public  Mono doOnConsumer(Function, ? extends T> function) {
        return Mono.create(monoSink -> {
            Disposable disposable = eventScheduler.schedule(() -> {
                try {
                    T result = function.apply(consumerProxy());
                    monoSink.success(result);
                } catch (Exception e) {
                    monoSink.error(e);
                }
            });
            monoSink.onCancel(disposable);
        });
    }

    public Mono commit(ConsumerRecord record) {
        long offset = record.offset();
        TopicPartition partition = new TopicPartition(record.topic(), record.partition());
        long committedOffset = atmostOnceOffsets.committedOffset(partition);
        atmostOnceOffsets.onDispatch(partition, offset);
        long commitAheadSize = receiverOptions.atmostOnceCommitAheadSize();
        ReceiverOffset committable = new CommittableOffset<>(
            partition,
            offset + commitAheadSize,
            consumerEventLoop.commitEvent,
            receiverOptions.commitBatchSize()
        );
        if (offset >= committedOffset) {
            return committable.commit();
        } else if (committedOffset - offset >= commitAheadSize / 2) {
            committable.commit().subscribe();
        }
        return Mono.empty();
    }

    public void acknowledge(ConsumerRecord record) {
        toCommittableOffset(record).acknowledge();
    }

    public CommittableOffset toCommittableOffset(ConsumerRecord record) {
        return new CommittableOffset<>(
            new TopicPartition(record.topic(), record.partition()),
            record.offset(),
            consumerEventLoop.commitEvent,
            receiverOptions.commitBatchSize()
        );
    }

    @SuppressWarnings("unchecked")
    private Consumer consumerProxy() {
        if (consumerProxy != null) {
            return consumerProxy;
        }

        Class[] interfaces = new Class[]{Consumer.class};
        InvocationHandler handler = (proxy, method, args) -> {
            if (DELEGATE_METHODS.contains(method.getName())) {
                try {
                    if (method.getName().equals("pause")) {
                        this.consumerEventLoop.paused((Collection) args[0]);
                    } else if (method.getName().equals("resume")) {
                        this.consumerEventLoop.resumed((Collection) args[0]);
                    }
                    return method.invoke(consumer, args);
                } catch (InvocationTargetException e) {
                    throw e.getCause();
                }
            } else {
                throw new UnsupportedOperationException("Method is not supported: " + method);
            }
        };
        consumerProxy = (Consumer) Proxy.newProxyInstance(Consumer.class.getClassLoader(), interfaces, handler);
        return consumerProxy;
    }

    private static class CommittableOffset implements ReceiverOffset {

        private final TopicPartition topicPartition;

        private final long commitOffset;

        private final ConsumerEventLoop.CommitEvent commitEvent;

        private final int commitBatchSize;

        private final AtomicBoolean acknowledged = new AtomicBoolean(false);

        public CommittableOffset(
            TopicPartition topicPartition,
            long nextOffset,
            ConsumerEventLoop.CommitEvent commitEvent,
            int commitBatchSize
        ) {
            this.topicPartition = topicPartition;
            this.commitOffset = nextOffset;
            this.commitEvent = commitEvent;
            this.commitBatchSize = commitBatchSize;
        }

        @Override
        public Mono commit() {
            if (maybeUpdateOffset() > 0)
                return scheduleCommit();
            else
                return Mono.empty();
        }

        @Override
        public void acknowledge() {
            long uncommittedCount = maybeUpdateOffset();
            if (commitBatchSize > 0 && uncommittedCount >= commitBatchSize)
                commitEvent.scheduleIfRequired();
        }

        @Override
        public TopicPartition topicPartition() {
            return topicPartition;
        }

        @Override
        public long offset() {
            return commitOffset;
        }

        private int maybeUpdateOffset() {
            if (acknowledged.compareAndSet(false, true))
                return commitEvent.commitBatch.updateOffset(topicPartition, commitOffset);
            else
                return commitEvent.commitBatch.batchSize();
        }

        private Mono scheduleCommit() {
            return Mono.create(emitter -> {
                commitEvent.commitBatch.addCallbackEmitter(emitter);
                commitEvent.scheduleIfRequired();
            });
        }

        @Override
        public String toString() {
            return topicPartition + "@" + commitOffset;
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy