All Downloads are FREE. Search and download functionalities are using the official Maven repository.

software.amazon.kinesis.retrieval.polling.PrefetchRecordsPublisher Maven / Gradle / Ivy

Go to download

The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data from Amazon Kinesis.

There is a newer version: 3.0.1
Show newest version
/*
 * Copyright 2019 Amazon.com, Inc. or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package software.amazon.kinesis.retrieval.polling;

import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;

import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.Validate;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;

import com.google.common.annotations.VisibleForTesting;

import lombok.Data;
import lombok.NonNull;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
import software.amazon.awssdk.core.exception.SdkException;
import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException;
import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse;
import software.amazon.kinesis.annotations.KinesisClientInternalApi;
import software.amazon.kinesis.common.InitialPositionInStreamExtended;
import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
import software.amazon.kinesis.metrics.MetricsFactory;
import software.amazon.kinesis.metrics.MetricsLevel;
import software.amazon.kinesis.metrics.MetricsScope;
import software.amazon.kinesis.metrics.MetricsUtil;
import software.amazon.kinesis.metrics.ThreadSafeMetricsDelegatingFactory;
import software.amazon.kinesis.retrieval.BatchUniqueIdentifier;
import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy;
import software.amazon.kinesis.retrieval.KinesisClientRecord;
import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
import software.amazon.kinesis.retrieval.RecordsPublisher;
import software.amazon.kinesis.retrieval.RecordsRetrieved;
import software.amazon.kinesis.retrieval.RetryableRetrievalException;
import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;

import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDeliveryActionIfRequired;

/**
 * This is the prefetch caching class, this class spins up a thread if prefetching is enabled. That thread fetches the
 * next set of records and stores it in the cache. The size of the cache is limited by setting
 * maxPendingProcessRecordsInput i.e. the maximum number of GetRecordsResult that the cache can store, maxByteSize
 * i.e. the byte size of the records stored in the cache and maxRecordsCount i.e. the max number of records that should
 * be present in the cache across multiple GetRecordsResult object. If no data is available in the cache, the call from
 * the record processor is blocked till records are retrieved from Kinesis.
 *
 * There are three threads namely publisher, demand-notifier and ack-notifier which will contend to drain the events
 * to the Subscriber (ShardConsumer in KCL).
 */
@Slf4j
@KinesisClientInternalApi
public class PrefetchRecordsPublisher implements RecordsPublisher {
    private static final String EXPIRED_ITERATOR_METRIC = "ExpiredIterator";
    private int maxPendingProcessRecordsInput;
    private int maxByteSize;
    private int maxRecordsCount;
    private final int maxRecordsPerCall;
    private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy;
    private final ExecutorService executorService;
    private final MetricsFactory metricsFactory;
    private final long idleMillisBetweenCalls;
    private Instant lastSuccessfulCall;
    private final DefaultGetRecordsCacheDaemon defaultGetRecordsCacheDaemon;
    private boolean started = false;
    private final String operation;
    private final String shardId;
    private Subscriber subscriber;
    @VisibleForTesting @Getter
    private final PublisherSession publisherSession;
    private final ReentrantReadWriteLock resetLock = new ReentrantReadWriteLock();
    private boolean wasReset = false;

    private Instant lastEventDeliveryTime = Instant.EPOCH;

    @Data
    @Accessors(fluent = true)
    static final class PublisherSession {
        private final AtomicLong requestedResponses = new AtomicLong(0);
        @VisibleForTesting @Getter
        private final LinkedBlockingQueue prefetchRecordsQueue;
        private final PrefetchCounters prefetchCounters;
        private final KinesisDataFetcher dataFetcher;
        private InitialPositionInStreamExtended initialPositionInStreamExtended;
        private String highestSequenceNumber;

        // Initialize the session on publisher start.
        void init(ExtendedSequenceNumber extendedSequenceNumber,
                InitialPositionInStreamExtended initialPositionInStreamExtended) {
            this.initialPositionInStreamExtended = initialPositionInStreamExtended;
            this.highestSequenceNumber = extendedSequenceNumber.sequenceNumber();
            this.dataFetcher.initialize(extendedSequenceNumber, initialPositionInStreamExtended);
        }

        // Reset the session when publisher restarts.
        void reset(PrefetchRecordsRetrieved prefetchRecordsRetrieved) {
            // Reset the demand from ShardConsumer, to prevent this publisher from delivering events to stale RX-Java
            // Subscriber. Publishing will be unblocked when the demand is communicated by the new Rx-Java subscriber.
            requestedResponses.set(0);
            // Clear the queue, so that the publisher repopulates the queue based on sequence number from subscriber.
            prefetchRecordsQueue.clear();
            prefetchCounters.reset();
            highestSequenceNumber = prefetchRecordsRetrieved.lastBatchSequenceNumber();
            dataFetcher.resetIterator(prefetchRecordsRetrieved.shardIterator(), highestSequenceNumber,
                    initialPositionInStreamExtended);
        }

        // Handle records delivery ack and execute nextEventDispatchAction.
        // This method is not thread-safe and needs to be called after acquiring a monitor.
        void handleRecordsDeliveryAck(RecordsDeliveryAck recordsDeliveryAck, String shardId, Runnable nextEventDispatchAction) {
            final PrefetchRecordsRetrieved recordsToCheck = peekNextRecord();
            // Verify if the ack matches the head of the queue and evict it.
            if (recordsToCheck != null && recordsToCheck.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) {
                evictPublishedRecordAndUpdateDemand(shardId);
                nextEventDispatchAction.run();
            } else {
                // Log and ignore any other ack received. As long as an ack is received for head of the queue
                // we are good. Any stale or future ack received can be ignored, though the latter is not feasible
                // to happen.
                final BatchUniqueIdentifier peekedBatchUniqueIdentifier =
                        recordsToCheck == null ? null : recordsToCheck.batchUniqueIdentifier();
                log.info("{} :  Received a stale notification with id {} instead of expected id {} at {}. Will ignore.",
                        shardId, recordsDeliveryAck.batchUniqueIdentifier(), peekedBatchUniqueIdentifier, Instant.now());
            }
        }

        // Evict the published record from the prefetch queue.
        // This method is not thread-safe and needs to be called after acquiring a monitor.
        @VisibleForTesting
        RecordsRetrieved evictPublishedRecordAndUpdateDemand(String shardId) {
            final PrefetchRecordsRetrieved result = prefetchRecordsQueue.poll();
            if (result != null) {
                updateDemandTrackersOnPublish(result);
            } else {
                log.info(
                        "{}: No record batch found while evicting from the prefetch queue. This indicates the prefetch buffer"
                                + "was reset.", shardId);
            }
            return result;
        }

        boolean hasDemandToPublish() {
            return requestedResponses.get() > 0;
        }

        PrefetchRecordsRetrieved peekNextRecord() {
            return prefetchRecordsQueue.peek();
        }

        boolean offerRecords(PrefetchRecordsRetrieved recordsRetrieved, long idleMillisBetweenCalls) throws InterruptedException {
            return prefetchRecordsQueue.offer(recordsRetrieved, idleMillisBetweenCalls, TimeUnit.MILLISECONDS);
        }

        private void updateDemandTrackersOnPublish(PrefetchRecordsRetrieved result) {
            prefetchCounters.removed(result.processRecordsInput);
            requestedResponses.decrementAndGet();
        }

    }

    /**
     * Constructor for the PrefetchRecordsPublisher. This cache prefetches records from Kinesis and stores them in a
     * LinkedBlockingQueue.
     * 
     * @see PrefetchRecordsPublisher
     * 
     * @param maxPendingProcessRecordsInput Max number of ProcessRecordsInput that can be held in the cache before
     *                                     blocking
     * @param maxByteSize Max byte size of the queue before blocking next get records call
     * @param maxRecordsCount Max number of records in the queue across all ProcessRecordInput objects
     * @param maxRecordsPerCall Max records to be returned per call
     * @param getRecordsRetrievalStrategy Retrieval strategy for the get records call
     * @param executorService Executor service for the cache
     * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call
     */
    public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount,
                                    final int maxRecordsPerCall,
                                    @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy,
                                    @NonNull final ExecutorService executorService,
                                    final long idleMillisBetweenCalls,
                                    @NonNull final MetricsFactory metricsFactory,
                                    @NonNull final String operation,
                                    @NonNull final String shardId) {
        this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy;
        this.maxRecordsPerCall = maxRecordsPerCall;
        this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput;
        this.maxByteSize = maxByteSize;
        this.maxRecordsCount = maxRecordsCount;
        this.publisherSession = new PublisherSession(new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput),
                new PrefetchCounters(), this.getRecordsRetrievalStrategy.getDataFetcher());
        this.executorService = executorService;
        this.metricsFactory = new ThreadSafeMetricsDelegatingFactory(metricsFactory);
        this.idleMillisBetweenCalls = idleMillisBetweenCalls;
        this.defaultGetRecordsCacheDaemon = new DefaultGetRecordsCacheDaemon();
        Validate.notEmpty(operation, "Operation cannot be empty");
        this.operation = operation;
        this.shardId = shardId;
    }

    @Override
    public void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) {
        if (executorService.isShutdown()) {
            throw new IllegalStateException("ExecutorService has been shutdown.");
        }

        publisherSession.init(extendedSequenceNumber, initialPositionInStreamExtended);

        if (!started) {
            log.info("{} : Starting prefetching thread.", shardId);
            executorService.execute(defaultGetRecordsCacheDaemon);
        }
        started = true;
    }

    private void throwOnIllegalState() {
        if (executorService.isShutdown()) {
            throw new IllegalStateException("Shutdown has been called on the cache, can't accept new requests.");
        }

        if (!started) {
            throw new IllegalStateException("Cache has not been initialized, make sure to call start.");
        }
    }

    private PrefetchRecordsRetrieved peekNextResult() {
        throwOnIllegalState();
        return publisherSession.peekNextRecord();
    }

    @Override
    public void shutdown() {
        defaultGetRecordsCacheDaemon.isShutdown = true;
        executorService.shutdownNow();
        started = false;
    }

    @Override
    public void restartFrom(RecordsRetrieved recordsRetrieved) {
        if (!(recordsRetrieved instanceof PrefetchRecordsRetrieved)) {
            throw new IllegalArgumentException(
                    "Provided RecordsRetrieved was not produced by the PrefetchRecordsPublisher");
        }
        resetLock.writeLock().lock();
        try {
            publisherSession.reset((PrefetchRecordsRetrieved)recordsRetrieved);
            wasReset = true;
        } finally {
            resetLock.writeLock().unlock();
        }
    }

    @Override
    public void subscribe(Subscriber s) {
        subscriber = s;
        subscriber.onSubscribe(new Subscription() {
            @Override
            public void request(long n) {
                publisherSession.requestedResponses().addAndGet(n);
                drainQueueForRequests();
            }

            @Override
            public void cancel() {
                // When the subscription is cancelled, the demand is set to 0, to prevent further
                // records from being dispatched to the consumer/subscriber. The publisher session state will be
                // reset when restartFrom(*) is called by the consumer/subscriber.
                publisherSession.requestedResponses().set(0);
            }
        });
    }

    @Override
    public synchronized void notify(RecordsDeliveryAck recordsDeliveryAck) {
        publisherSession.handleRecordsDeliveryAck(recordsDeliveryAck, shardId, () -> drainQueueForRequests());
        // Take action based on the time spent by the event in queue.
        takeDelayedDeliveryActionIfRequired(shardId, lastEventDeliveryTime, log);
    }

    // Note : Do not make this method synchronous as notify() will not be able to evict any entry from the queue.
    private void addArrivedRecordsInput(PrefetchRecordsRetrieved recordsRetrieved) throws InterruptedException {
        wasReset = false;
        while (!publisherSession.offerRecords(recordsRetrieved, idleMillisBetweenCalls)) {
            //
            // Unlocking the read lock, and then reacquiring the read lock, should allow any waiters on the write lock a
            // chance to run. If the write lock is acquired by restartFrom than the readLock will now block until
            // restartFrom(...) has completed. This is to ensure that if a reset has occurred we know to discard the
            // data we received and start a new fetch of data.
            //
            resetLock.readLock().unlock();
            resetLock.readLock().lock();
            if (wasReset) {
                throw new PositionResetException();
            }
        }
        publisherSession.prefetchCounters().added(recordsRetrieved.processRecordsInput);
    }

    /**
     * Method to drain the queue based on the demand and the events availability in the queue.
     */
    @VisibleForTesting
    synchronized void drainQueueForRequests() {
        final PrefetchRecordsRetrieved recordsToDeliver = peekNextResult();
        // If there is an event available to drain and if there is at least one demand,
        // then schedule it for delivery
        if (publisherSession.hasDemandToPublish() && canDispatchRecord(recordsToDeliver)) {
            subscriber.onNext(recordsToDeliver.prepareForPublish());
            recordsToDeliver.dispatched();
            lastEventDeliveryTime = Instant.now();
        }
    }

    // This method is thread-safe and informs the caller on whether this record is eligible to be dispatched.
    // If this record was already dispatched earlier, then this method would return false.
    private static boolean canDispatchRecord(PrefetchRecordsRetrieved recordsToDeliver) {
        return recordsToDeliver != null && !recordsToDeliver.isDispatched();
    }

    @Accessors(fluent = true)
    @Data
    static class PrefetchRecordsRetrieved implements RecordsRetrieved {

        final ProcessRecordsInput processRecordsInput;
        final String lastBatchSequenceNumber;
        final String shardIterator;
        final BatchUniqueIdentifier batchUniqueIdentifier;
        @Accessors() @Setter(AccessLevel.NONE) boolean dispatched = false;

        PrefetchRecordsRetrieved prepareForPublish() {
            return new PrefetchRecordsRetrieved(processRecordsInput.toBuilder().cacheExitTime(Instant.now()).build(),
                    lastBatchSequenceNumber, shardIterator, batchUniqueIdentifier);
        }

        @Override
        public BatchUniqueIdentifier batchUniqueIdentifier() {
            return batchUniqueIdentifier;
        }

        // Indicates if this record batch was already dispatched for delivery.
        void dispatched() { dispatched = true; }

        /**
         * Generate batch unique identifier for PrefetchRecordsRetrieved, where flow will be empty.
         * @return BatchUniqueIdentifier
         */
        public static BatchUniqueIdentifier generateBatchUniqueIdentifier() {
            return new BatchUniqueIdentifier(UUID.randomUUID().toString(),
                    StringUtils.EMPTY);
        }

    }

    private String calculateHighestSequenceNumber(ProcessRecordsInput processRecordsInput) {
        String result = publisherSession.highestSequenceNumber();
        if (processRecordsInput.records() != null && !processRecordsInput.records().isEmpty()) {
            result = processRecordsInput.records().get(processRecordsInput.records().size() - 1).sequenceNumber();
        }
        return result;
    }

    private static class PositionResetException extends RuntimeException {

    }


    private class DefaultGetRecordsCacheDaemon implements Runnable {
        volatile boolean isShutdown = false;

        @Override
        public void run() {
            while (!isShutdown) {
                if (Thread.currentThread().isInterrupted()) {
                    log.warn("{} : Prefetch thread was interrupted.", shardId);
                    break;
                }

                resetLock.readLock().lock();
                try {
                    makeRetrievalAttempt();
                } catch(PositionResetException pre) {
                    log.debug("{} : Position was reset while attempting to add item to queue.", shardId);
                } finally {
                    resetLock.readLock().unlock();
                }


            }
            callShutdownOnStrategy();
        }

        private void makeRetrievalAttempt() {
            MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation);
            if (publisherSession.prefetchCounters().shouldGetNewRecords()) {
                try {
                    sleepBeforeNextCall();
                    GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall);
                    lastSuccessfulCall = Instant.now();

                    final List records = getRecordsResult.records().stream()
                            .map(KinesisClientRecord::fromRecord).collect(Collectors.toList());
                    ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder()
                            .records(records)
                            .millisBehindLatest(getRecordsResult.millisBehindLatest())
                            .cacheEntryTime(lastSuccessfulCall)
                            .isAtShardEnd(getRecordsRetrievalStrategy.getDataFetcher().isShardEndReached())
                            .build();

                    PrefetchRecordsRetrieved recordsRetrieved = new PrefetchRecordsRetrieved(processRecordsInput,
                            calculateHighestSequenceNumber(processRecordsInput), getRecordsResult.nextShardIterator(),
                            PrefetchRecordsRetrieved.generateBatchUniqueIdentifier());
                    publisherSession.highestSequenceNumber(recordsRetrieved.lastBatchSequenceNumber);
                    addArrivedRecordsInput(recordsRetrieved);
                    drainQueueForRequests();
                } catch (PositionResetException pse) {
                    throw pse;
                } catch (RetryableRetrievalException rre) {
                    log.info("{} :  Timeout occurred while waiting for response from Kinesis.  Will retry the request.", shardId);
                } catch (InterruptedException e) {
                    log.info("{} :  Thread was interrupted, indicating shutdown was called on the cache.", shardId);
                } catch (ExpiredIteratorException e) {
                    log.info("{} :  records threw ExpiredIteratorException - restarting"
                            + " after greatest seqNum passed to customer", shardId, e);

                    scope.addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.COUNT, MetricsLevel.SUMMARY);

                    publisherSession.dataFetcher().restartIterator();
                } catch (SdkException e) {
                    log.error("{} :  Exception thrown while fetching records from Kinesis", shardId, e);
                } catch (Throwable e) {
                    log.error("{} :  Unexpected exception was thrown. This could probably be an issue or a bug." +
                            " Please search for the exception/error online to check what is going on. If the " +
                            "issue persists or is a recurring problem, feel free to open an issue on, " +
                            "https://github.com/awslabs/amazon-kinesis-client.", shardId, e);
                } finally {
                    MetricsUtil.endScope(scope);
                }
            } else {
                //
                // Consumer isn't ready to receive new records will allow prefetch counters to pause
                //
                try {
                    publisherSession.prefetchCounters().waitForConsumer();
                } catch (InterruptedException ie) {
                    log.info("{} :  Thread was interrupted while waiting for the consumer.  " +
                            "Shutdown has probably been started", shardId);
                }
            }
        }

        private void callShutdownOnStrategy() {
            if (!getRecordsRetrievalStrategy.isShutdown()) {
                getRecordsRetrievalStrategy.shutdown();
            }
        }

        private void sleepBeforeNextCall() throws InterruptedException {
            if (lastSuccessfulCall == null) {
                return;
            }
            long timeSinceLastCall = Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis();
            if (timeSinceLastCall < idleMillisBetweenCalls) {
                Thread.sleep(idleMillisBetweenCalls - timeSinceLastCall);
            }
        }
    }

    private class PrefetchCounters {
        private long size = 0;
        private long byteSize = 0;

        public synchronized void added(final ProcessRecordsInput result) {
            size += getSize(result);
            byteSize += getByteSize(result);
        }

        public synchronized void removed(final ProcessRecordsInput result) {
            size -= getSize(result);
            byteSize -= getByteSize(result);
            this.notifyAll();
        }

        private long getSize(final ProcessRecordsInput result) {
            return result.records().size();
        }

        private long getByteSize(final ProcessRecordsInput result) {
            return result.records().stream().mapToLong(record -> record.data().limit()).sum();
        }

        public synchronized void waitForConsumer() throws InterruptedException {
            if (!shouldGetNewRecords()) {
                log.debug("{} : Queue is full waiting for consumer for {} ms", shardId, idleMillisBetweenCalls);
                this.wait(idleMillisBetweenCalls);
            }
        }

        public synchronized boolean shouldGetNewRecords() {
            if (log.isDebugEnabled()) {
                log.debug("{} : Current Prefetch Counter States: {}", shardId, this.toString());
            }
            return size < maxRecordsCount && byteSize < maxByteSize;
        }

        void reset() {
            size = 0;
            byteSize = 0;
        }

        @Override
        public String toString() {
            return String.format("{ Requests: %d, Records: %d, Bytes: %d }", publisherSession.prefetchRecordsQueue().size(), size,
                    byteSize);
        }
    }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy