All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.debezium.connector.spanner.task.PartitionOffsetProvider Maven / Gradle / Ivy

There is a newer version: 3.0.0.CR1
Show newest version
/*
 * Copyright Debezium Authors.
 *
 * Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
 */
package io.debezium.connector.spanner.task;

import java.time.Instant;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;

import org.apache.kafka.connect.storage.OffsetStorageReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.cloud.Timestamp;

import io.debezium.connector.spanner.SpannerPartition;
import io.debezium.connector.spanner.context.offset.PartitionOffset;
import io.debezium.connector.spanner.kafka.internal.model.PartitionState;
import io.debezium.connector.spanner.metrics.MetricsEventPublisher;
import io.debezium.connector.spanner.metrics.event.OffsetReceivingTimeMetricEvent;

/**
 * Retrieves offsets from Kafka Connect
 * and publishes appropriate metrics
 */
public class PartitionOffsetProvider {
    private static final Logger LOGGER = LoggerFactory.getLogger(PartitionOffsetProvider.class);

    private final OffsetStorageReader offsetStorageReader;
    private final MetricsEventPublisher metricsEventPublisher;

    private final ExecutorService executor;

    public PartitionOffsetProvider(OffsetStorageReader offsetStorageReader, MetricsEventPublisher metricsEventPublisher) {
        this.offsetStorageReader = offsetStorageReader;
        this.metricsEventPublisher = metricsEventPublisher;
        this.executor = Executors.newCachedThreadPool();
    }

    public Timestamp getOffset(PartitionState token) {
        Map spannerPartition = new SpannerPartition(token.getToken()).getSourcePartition();

        Map result = retrieveOffsetMap(spannerPartition);
        if (result == null) {
            LOGGER.warn("Token {} returning start timestamp because no offset was retrieved", token);
            return token.getStartTimestamp();
        }
        LOGGER.info("Successfully retrieved offset {} for token {}", result, token);
        return PartitionOffset.extractOffset(result);
    }

    public Map getOffsetMap(PartitionState token) {

        Map spannerPartition = new SpannerPartition(token.getToken()).getSourcePartition();
        Map result = retrieveOffsetMap(spannerPartition);

        if (result == null) {
            return Map.of();
        }
        return (Map) result;
    }

    public Map getOffsets(Collection partitions) {
        Instant startTime = Instant.now();

        List> partitionsMapList = partitions.stream()
                .map(token -> new SpannerPartition(token).getSourcePartition())
                .collect(Collectors.toList());

        Map, Map> result = this.offsetStorageReader.offsets(partitionsMapList);

        if (result == null) {
            return Map.of();
        }

        metricsEventPublisher.publishMetricEvent(OffsetReceivingTimeMetricEvent.from(startTime));

        Map map = new HashMap<>();

        for (Map.Entry, Map> entry : result.entrySet()) {
            map.put(SpannerPartition.extractToken(entry.getKey()),
                    PartitionOffset.extractOffset(entry.getValue()));
        }

        return map;
    }

    private Map retrieveOffsetMap(Map spannerPartition) {
        Instant startTime = Instant.now();
        Map result = null;
        Future> future = executor.submit(new ExecutorServiceCallable(offsetStorageReader, spannerPartition));
        try {
            result = future.get(5, TimeUnit.SECONDS);
        }
        catch (TimeoutException ex) {
            // handle the timeout
            LOGGER.error("Token {}, failed to retrieve offset in time", spannerPartition, ex);
        }
        catch (InterruptedException e) {
            // handle the interrupts
            LOGGER.error("Token {},interrupting PartitionOffsetProvider", spannerPartition, e);
            Thread.currentThread().interrupt();
        }
        catch (ExecutionException e) {
            // handle other exceptions
            LOGGER.error("Token {}, failed to retrieve offset {}:{}", spannerPartition, e.toString(), e.getStackTrace());
        }
        finally {
            future.cancel(true); // may or may not desire this
        }
        metricsEventPublisher.publishMetricEvent(OffsetReceivingTimeMetricEvent.from(startTime));
        return result;
    }

    public static class ExecutorServiceCallable implements Callable> {

        private OffsetStorageReader offsetStorageReader;
        private Map spannerPartition;

        public ExecutorServiceCallable(OffsetStorageReader offsetStorageReader, Map spannerPartition) {
            this.offsetStorageReader = offsetStorageReader;
            this.spannerPartition = spannerPartition;
        }

        @Override
        public Map call() throws Exception {
            try {
                return this.offsetStorageReader.offset(spannerPartition);
            }
            catch (Exception e) {
                LOGGER.error("Offsetstoragereader throwing exception", e);
                throw e;
            }
        }
    }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy