All Downloads are FREE. Search and download functionalities are using the official Maven repository.
Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
io.debezium.connector.sqlserver.SqlServerChangeEventSourceCoordinator Maven / Gradle / Ivy
/*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.connector.sqlserver;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.connect.source.SourceConnector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.connector.common.CdcSourceTaskContext;
import io.debezium.pipeline.ChangeEventSourceCoordinator;
import io.debezium.pipeline.ErrorHandler;
import io.debezium.pipeline.EventDispatcher;
import io.debezium.pipeline.metrics.spi.ChangeEventSourceMetricsFactory;
import io.debezium.pipeline.notification.NotificationService;
import io.debezium.pipeline.signal.SignalProcessor;
import io.debezium.pipeline.source.spi.ChangeEventSource;
import io.debezium.pipeline.source.spi.ChangeEventSource.ChangeEventSourceContext;
import io.debezium.pipeline.source.spi.ChangeEventSourceFactory;
import io.debezium.pipeline.source.spi.SnapshotChangeEventSource;
import io.debezium.pipeline.spi.Offsets;
import io.debezium.pipeline.spi.SnapshotResult;
import io.debezium.schema.DatabaseSchema;
import io.debezium.util.Clock;
import io.debezium.util.LoggingContext;
import io.debezium.util.Metronome;
/**
* Coordinates one or more {@link ChangeEventSource}s and executes them in order. Extends the base
* {@link ChangeEventSourceCoordinator} to support snapshotting and streaming of multiple partitions.
*/
public class SqlServerChangeEventSourceCoordinator extends ChangeEventSourceCoordinator {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlServerChangeEventSourceCoordinator.class);
private final Clock clock;
private final Duration pollInterval;
private final AtomicBoolean firstStreamingIterationCompletedSuccessfully = new AtomicBoolean(false);
public SqlServerChangeEventSourceCoordinator(Offsets previousOffsets, ErrorHandler errorHandler,
Class extends SourceConnector> connectorType,
CommonConnectorConfig connectorConfig,
ChangeEventSourceFactory changeEventSourceFactory,
ChangeEventSourceMetricsFactory changeEventSourceMetricsFactory,
EventDispatcher eventDispatcher,
DatabaseSchema> schema,
Clock clock,
SignalProcessor signalProcessor,
NotificationService notificationService) {
super(previousOffsets, errorHandler, connectorType, connectorConfig, changeEventSourceFactory,
changeEventSourceMetricsFactory, eventDispatcher, schema, signalProcessor, notificationService);
this.clock = clock;
this.pollInterval = connectorConfig.getPollInterval();
}
public boolean firstStreamingIterationCompletedSuccessfully() {
return firstStreamingIterationCompletedSuccessfully.get();
}
@Override
protected void executeChangeEventSources(CdcSourceTaskContext taskContext, SnapshotChangeEventSource snapshotSource,
Offsets previousOffsets,
AtomicReference previousLogContext,
ChangeEventSourceContext context)
throws InterruptedException {
Offsets streamingOffsets = Offsets.of(new HashMap<>());
for (Map.Entry entry : previousOffsets) {
SqlServerPartition partition = entry.getKey();
SqlServerOffsetContext previousOffset = entry.getValue();
previousLogContext.set(taskContext.configureLoggingContext("snapshot", partition));
SnapshotResult snapshotResult = doSnapshot(snapshotSource, context, partition, previousOffset);
if (snapshotResult.isCompletedOrSkipped()) {
streamingOffsets.getOffsets().put(partition, snapshotResult.getOffset());
if (previousOffsets.getOffsets().size() == 1) {
signalProcessor.setContext(snapshotResult.getOffset());
}
}
}
previousLogContext.set(taskContext.configureLoggingContext("streaming"));
// TODO: Determine how to do incremental snapshots with multiple partitions
for (Map.Entry entry : streamingOffsets) {
initStreamEvents(entry.getKey(), entry.getValue());
}
final Metronome metronome = Metronome.sleeper(pollInterval, clock);
LOGGER.info("Starting streaming");
while (context.isRunning()) {
boolean streamedEvents = false;
for (Map.Entry entry : streamingOffsets) {
SqlServerPartition partition = entry.getKey();
SqlServerOffsetContext previousOffset = entry.getValue();
previousLogContext.set(taskContext.configureLoggingContext("streaming", partition));
if (context.isRunning()) {
streamedEvents = streamingSource.executeIteration(context, partition, previousOffset);
}
}
if (!streamedEvents) {
metronome.pause();
}
if (errorHandler.getProducerThrowable() == null) {
firstStreamingIterationCompletedSuccessfully.set(true);
}
if (context.isPaused()) {
LOGGER.info("Streaming will now pause");
context.streamingPaused();
context.waitSnapshotCompletion();
LOGGER.info("Streaming resumed");
}
}
LOGGER.info("Finished streaming");
}
}