com.snowflake.kafka.connector.internal.streaming.DirectTopicPartitionChannel Maven / Gradle / Ivy
Show all versions of snowflake-kafka-connector Show documentation
package com.snowflake.kafka.connector.internal.streaming;
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_CONFIG;
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_DEFAULT;
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_DEAD_LETTER_QUEUE_TOPIC_NAME_CONFIG;
import static com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig.ERRORS_TOLERANCE_CONFIG;
import static com.snowflake.kafka.connector.internal.streaming.StreamingUtils.DURATION_BETWEEN_GET_OFFSET_TOKEN_RETRY;
import static com.snowflake.kafka.connector.internal.streaming.StreamingUtils.MAX_GET_OFFSET_TOKEN_RETRIES;
import static java.time.temporal.ChronoUnit.SECONDS;
import static org.apache.kafka.common.record.TimestampType.NO_TIMESTAMP_TYPE;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.snowflake.kafka.connector.SnowflakeSinkConnectorConfig;
import com.snowflake.kafka.connector.Utils;
import com.snowflake.kafka.connector.dlq.KafkaRecordErrorReporter;
import com.snowflake.kafka.connector.internal.BufferThreshold;
import com.snowflake.kafka.connector.internal.KCLogger;
import com.snowflake.kafka.connector.internal.SnowflakeConnectionService;
import com.snowflake.kafka.connector.internal.SnowflakeErrors;
import com.snowflake.kafka.connector.internal.SnowflakeKafkaConnectorException;
import com.snowflake.kafka.connector.internal.metrics.MetricsJmxReporter;
import com.snowflake.kafka.connector.internal.streaming.channel.TopicPartitionChannel;
import com.snowflake.kafka.connector.internal.streaming.schemaevolution.InsertErrorMapper;
import com.snowflake.kafka.connector.internal.streaming.schemaevolution.SchemaEvolutionService;
import com.snowflake.kafka.connector.internal.streaming.schemaevolution.SchemaEvolutionTargetItems;
import com.snowflake.kafka.connector.internal.streaming.telemetry.SnowflakeTelemetryChannelCreation;
import com.snowflake.kafka.connector.internal.streaming.telemetry.SnowflakeTelemetryChannelStatus;
import com.snowflake.kafka.connector.internal.telemetry.SnowflakeTelemetryService;
import com.snowflake.kafka.connector.records.RecordService;
import com.snowflake.kafka.connector.records.RecordServiceFactory;
import com.snowflake.kafka.connector.records.SnowflakeJsonSchema;
import com.snowflake.kafka.connector.records.SnowflakeRecordContent;
import dev.failsafe.Failsafe;
import dev.failsafe.Fallback;
import dev.failsafe.RetryPolicy;
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import net.snowflake.ingest.streaming.*;
import net.snowflake.ingest.utils.SFException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.errors.ConnectException;
import org.apache.kafka.connect.errors.DataException;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTaskContext;
public class DirectTopicPartitionChannel implements TopicPartitionChannel {
private static final KCLogger LOGGER = new KCLogger(DirectTopicPartitionChannel.class.getName());
// used to communicate to the streaming ingest's insertRows API
// This is non final because we might decide to get the new instance of Channel
private SnowflakeStreamingIngestChannel channel;
// -------- private final fields -------- //
// This offset represents the data persisted in Snowflake. More specifically it is the Snowflake
// offset determined from the insertRows API call. It is set after calling the fetchOffsetToken
// API for this channel
private final AtomicLong offsetPersistedInSnowflake =
new AtomicLong(NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE);
// This offset represents the data buffered in KC. More specifically it is the KC offset to ensure
// exactly once functionality. On the creation it is set to the latest committed token in
// Snowflake (see offsetPersistedInSnowflake) and updated on each new row from KC.
private final AtomicLong processedOffset =
new AtomicLong(NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE);
// This offset is would not be required for buffer-less channel, but we add it to keep buffered
// and non-buffered
// channel versions compatible.
private final AtomicLong currentConsumerGroupOffset =
new AtomicLong(NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE);
// Indicates whether we need to skip and discard any leftover rows in the current batch, this
// could happen when the channel gets invalidated and reset, then anything left in the buffer
// should be skipped
private boolean needToSkipCurrentBatch = false;
private final SnowflakeStreamingIngestClient streamingIngestClient;
// Topic partition Object from connect consisting of topic and partition
private final TopicPartition topicPartition;
/* Channel Name is computed from topic and partition */
private final String channelNameFormatV1;
/* table is required for opening the channel */
private final String tableName;
/* Error handling, DB, schema, Snowflake URL and other snowflake specific connector properties are defined here. */
private final Map sfConnectorConfig;
/* Responsible for converting records to Json */
private final RecordService recordService;
/* Responsible for returning errors to DLQ if records have failed to be ingested. */
private final KafkaRecordErrorReporter kafkaRecordErrorReporter;
private final SchemaEvolutionService schemaEvolutionService;
/**
* Available from {@link org.apache.kafka.connect.sink.SinkTask} which has access to various
* utility methods.
*/
private final SinkTaskContext sinkTaskContext;
/* Error related properties */
// If set to true, we will send records to DLQ provided DLQ name is valid.
private final boolean errorTolerance;
// Whether to log errors to log file.
private final boolean logErrors;
// Set to false if DLQ topic is null or empty. True if it is a valid string in config
private final boolean isDLQTopicSet;
// Whether schematization has been enabled.
private final boolean enableSchematization;
// Whether schema evolution could be done on this channel
private final boolean enableSchemaEvolution;
// Reference to the Snowflake connection service
private final SnowflakeConnectionService conn;
private final SnowflakeTelemetryChannelStatus snowflakeTelemetryChannelStatus;
private final InsertErrorMapper insertErrorMapper;
/**
* Used to send telemetry to Snowflake. Currently, TelemetryClient created from a Snowflake
* Connection Object, i.e. not a session-less Client
*/
private final SnowflakeTelemetryService telemetryServiceV2;
/** Testing only, initialize TopicPartitionChannel without the connection service */
@VisibleForTesting
public DirectTopicPartitionChannel(
SnowflakeStreamingIngestClient streamingIngestClient,
TopicPartition topicPartition,
final String channelNameFormatV1,
final String tableName,
final BufferThreshold streamingBufferThreshold,
final Map sfConnectorConfig,
KafkaRecordErrorReporter kafkaRecordErrorReporter,
SinkTaskContext sinkTaskContext,
SnowflakeConnectionService conn,
SnowflakeTelemetryService telemetryService,
SchemaEvolutionService schemaEvolutionService,
InsertErrorMapper insertErrorMapper) {
this(
streamingIngestClient,
topicPartition,
channelNameFormatV1,
tableName,
false, /* No schema evolution permission */
streamingBufferThreshold,
sfConnectorConfig,
kafkaRecordErrorReporter,
sinkTaskContext,
conn,
RecordServiceFactory.createRecordService(
false, Utils.isSchematizationEnabled(sfConnectorConfig)),
telemetryService,
false,
null,
schemaEvolutionService,
insertErrorMapper);
}
/**
* @param streamingIngestClient client created specifically for this task
* @param topicPartition topic partition corresponding to this Streaming Channel
* (TopicPartitionChannel)
* @param channelNameFormatV1 channel Name which is deterministic for topic and partition
* @param tableName table to ingest in snowflake
* @param hasSchemaEvolutionPermission if the role has permission to perform schema evolution on
* the table
* @param streamingBufferThreshold bytes, count of records and flush time thresholds.
* @param sfConnectorConfig configuration set for snowflake connector
* @param kafkaRecordErrorReporter kafka errpr reporter for sending records to DLQ
* @param sinkTaskContext context on Kafka Connect's runtime
* @param conn the snowflake connection service
* @param recordService record service for processing incoming offsets from Kafka
* @param telemetryService Telemetry Service which includes the Telemetry Client, sends Json data
* to Snowflake
* @param insertErrorMapper Mapper to map insert errors to schema evolution items
*/
public DirectTopicPartitionChannel(
SnowflakeStreamingIngestClient streamingIngestClient,
TopicPartition topicPartition,
final String channelNameFormatV1,
final String tableName,
boolean hasSchemaEvolutionPermission,
final BufferThreshold streamingBufferThreshold,
final Map sfConnectorConfig,
KafkaRecordErrorReporter kafkaRecordErrorReporter,
SinkTaskContext sinkTaskContext,
SnowflakeConnectionService conn,
RecordService recordService,
SnowflakeTelemetryService telemetryService,
boolean enableCustomJMXMonitoring,
MetricsJmxReporter metricsJmxReporter,
SchemaEvolutionService schemaEvolutionService,
InsertErrorMapper insertErrorMapper) {
final long startTime = System.currentTimeMillis();
this.streamingIngestClient = Preconditions.checkNotNull(streamingIngestClient);
Preconditions.checkState(!streamingIngestClient.isClosed());
this.topicPartition = Preconditions.checkNotNull(topicPartition);
this.channelNameFormatV1 = Preconditions.checkNotNull(channelNameFormatV1);
this.tableName = Preconditions.checkNotNull(tableName);
this.sfConnectorConfig = Preconditions.checkNotNull(sfConnectorConfig);
this.kafkaRecordErrorReporter = Preconditions.checkNotNull(kafkaRecordErrorReporter);
this.sinkTaskContext = Preconditions.checkNotNull(sinkTaskContext);
this.conn = conn;
this.recordService = recordService;
this.telemetryServiceV2 = Preconditions.checkNotNull(telemetryService);
/* Error properties */
this.errorTolerance = StreamingUtils.tolerateErrors(this.sfConnectorConfig);
this.logErrors = StreamingUtils.logErrors(this.sfConnectorConfig);
this.isDLQTopicSet =
!Strings.isNullOrEmpty(StreamingUtils.getDlqTopicName(this.sfConnectorConfig));
/* Schematization related properties */
this.enableSchematization = Utils.isSchematizationEnabled(this.sfConnectorConfig);
this.enableSchemaEvolution = this.enableSchematization && hasSchemaEvolutionPermission;
this.schemaEvolutionService = schemaEvolutionService;
if (isEnableChannelOffsetMigration(sfConnectorConfig)) {
/* Channel Name format V2 is computed from connector name, topic and partition */
final String channelNameFormatV2 =
TopicPartitionChannel.generateChannelNameFormatV2(
this.channelNameFormatV1, this.conn.getConnectorName());
conn.migrateStreamingChannelOffsetToken(
this.tableName, channelNameFormatV2, this.channelNameFormatV1);
}
// Open channel and reset the offset in kafka
this.channel = Preconditions.checkNotNull(openChannelForTable());
final long lastCommittedOffsetToken = fetchOffsetTokenWithRetry();
this.offsetPersistedInSnowflake.set(lastCommittedOffsetToken);
this.processedOffset.set(lastCommittedOffsetToken);
// setup telemetry and metrics
String connectorName =
conn == null || conn.getConnectorName() == null || conn.getConnectorName().isEmpty()
? "default_connector_name"
: conn.getConnectorName();
this.snowflakeTelemetryChannelStatus =
new SnowflakeTelemetryChannelStatus(
tableName,
connectorName,
channelNameFormatV1,
startTime,
enableCustomJMXMonitoring,
metricsJmxReporter,
this.offsetPersistedInSnowflake,
this.processedOffset,
this.currentConsumerGroupOffset);
this.telemetryServiceV2.reportKafkaPartitionStart(
new SnowflakeTelemetryChannelCreation(this.tableName, this.channelNameFormatV1, startTime));
this.insertErrorMapper = insertErrorMapper;
if (lastCommittedOffsetToken != NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE) {
this.sinkTaskContext.offset(this.topicPartition, lastCommittedOffsetToken + 1L);
} else {
LOGGER.info(
"TopicPartitionChannel:{}, offset token is NULL, will rely on Kafka to send us the"
+ " correct offset instead",
this.getChannelNameFormatV1());
}
}
/**
* Checks if the configuration provided in Snowflake Kafka Connect has set {@link
* SnowflakeSinkConnectorConfig#ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_CONFIG} to any value. If not
* set, it fetches the default value.
*
* If the returned is false, system function for channel offset migration will not be called
* and Channel name will use V1 format.
*
* @param sfConnectorConfig customer provided json config
* @return true is enabled, false otherwise
*/
private boolean isEnableChannelOffsetMigration(Map sfConnectorConfig) {
boolean isEnableChannelOffsetMigration =
Boolean.parseBoolean(
sfConnectorConfig.getOrDefault(
SnowflakeSinkConnectorConfig.ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_CONFIG,
Boolean.toString(ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_DEFAULT)));
if (!isEnableChannelOffsetMigration) {
LOGGER.info(
"Config:{} is disabled for connector:{}",
ENABLE_CHANNEL_OFFSET_TOKEN_MIGRATION_CONFIG,
conn.getConnectorName());
}
return isEnableChannelOffsetMigration;
}
@Override
public void insertRecord(SinkRecord kafkaSinkRecord, boolean isFirstRowPerPartitionInBatch) {
final long currentOffsetPersistedInSnowflake = this.offsetPersistedInSnowflake.get();
final long currentProcessedOffset = this.processedOffset.get();
// for backwards compatibility - set the consumer offset to be the first one received from kafka
if (currentConsumerGroupOffset.get() == NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE) {
this.currentConsumerGroupOffset.set(kafkaSinkRecord.kafkaOffset());
}
// Reset the value if it's a new batch
if (isFirstRowPerPartitionInBatch) {
needToSkipCurrentBatch = false;
}
// Simply skip inserting into the buffer if the row should be ignored after channel reset
if (needToSkipCurrentBatch) {
LOGGER.info(
"Ignore inserting offset:{} for channel:{} because we recently reset offset in"
+ " Kafka. currentProcessedOffset:{}",
kafkaSinkRecord.kafkaOffset(),
this.getChannelNameFormatV1(),
currentProcessedOffset);
return;
}
// Accept the incoming record only if we don't have a valid offset token at server side, or the
// incoming record offset is 1 + the processed offset
if (currentProcessedOffset == NO_OFFSET_TOKEN_REGISTERED_IN_SNOWFLAKE
|| kafkaSinkRecord.kafkaOffset() >= currentProcessedOffset + 1) {
transformAndSend(kafkaSinkRecord);
} else {
LOGGER.warn(
"Channel {} - skipping current record - expected offset {} but received {}. The current"
+ " offset stored in Snowflake: {}",
this.getChannelNameFormatV1(),
currentProcessedOffset,
kafkaSinkRecord.kafkaOffset(),
currentOffsetPersistedInSnowflake);
}
}
private boolean shouldConvertContent(final Object content) {
return content != null && !(content instanceof SnowflakeRecordContent);
}
/**
* This would always return false for streaming ingest use case since isBroken field is never set.
* isBroken is set only when using Custom snowflake converters and the content was not json
* serializable.
*
* For Community converters, the kafka record will not be sent to Kafka connector if the record
* is not serializable.
*/
private boolean isRecordBroken(final SinkRecord record) {
return isContentBroken(record.value()) || isContentBroken(record.key());
}
private boolean isContentBroken(final Object content) {
return content != null && ((SnowflakeRecordContent) content).isBroken();
}
private SinkRecord handleNativeRecord(SinkRecord record, boolean isKey) {
SnowflakeRecordContent newSFContent;
Schema schema = isKey ? record.keySchema() : record.valueSchema();
Object content = isKey ? record.key() : record.value();
try {
newSFContent = new SnowflakeRecordContent(schema, content, true);
} catch (Exception e) {
LOGGER.error("Native content parser error:\n{}", e.getMessage());
try {
// try to serialize this object and send that as broken record
ByteArrayOutputStream out = new ByteArrayOutputStream();
ObjectOutputStream os = new ObjectOutputStream(out);
os.writeObject(content);
newSFContent = new SnowflakeRecordContent(out.toByteArray());
} catch (Exception serializeError) {
LOGGER.error(
"Failed to convert broken native record to byte data:\n{}",
serializeError.getMessage());
throw e;
}
}
// create new sinkRecord
Schema keySchema = isKey ? new SnowflakeJsonSchema() : record.keySchema();
Object keyContent = isKey ? newSFContent : record.key();
Schema valueSchema = isKey ? record.valueSchema() : new SnowflakeJsonSchema();
Object valueContent = isKey ? record.value() : newSFContent;
return new SinkRecord(
record.topic(),
record.kafkaPartition(),
keySchema,
keyContent,
valueSchema,
valueContent,
record.kafkaOffset(),
record.timestamp(),
record.timestampType(),
record.headers());
}
// --------------- BUFFER FLUSHING LOGIC --------------- //
@Override
@Deprecated
public void insertBufferedRecordsIfFlushTimeThresholdReached() {
// It is just a leftover after buffered channel, not needed here as a buffer was removed in the
// current class.
// todo remove this method in the future
}
private void transformAndSend(SinkRecord kafkaSinkRecord) {
try {
Map transformedRecord = transformDataBeforeSending(kafkaSinkRecord);
if (!transformedRecord.isEmpty()) {
InsertValidationResponse response =
insertRowWithFallback(transformedRecord, kafkaSinkRecord.kafkaOffset());
this.processedOffset.set(kafkaSinkRecord.kafkaOffset());
if (response.hasErrors()) {
LOGGER.warn(
"insertRow for channel:{} resulted in errors:{},",
this.getChannelNameFormatV1(),
response.hasErrors());
handleInsertRowFailure(response.getInsertErrors(), kafkaSinkRecord);
}
}
} catch (TopicPartitionChannelInsertionException ex) {
// Suppressing the exception because other channels might still continue to ingest
LOGGER.warn(
String.format(
"[INSERT_BUFFERED_RECORDS] Failure inserting rows for channel:%s",
this.getChannelNameFormatV1()),
ex);
}
}
/**
* Uses {@link Fallback} API to reopen the channel if insertRows throws {@link SFException}.
*
* We have deliberately not performed retries on insertRows because it might slow down overall
* ingestion and introduce lags in committing offsets to Kafka.
*
*
Note that insertRows API does perform channel validation which might throw SFException if
* channel is invalidated.
*
*
It can also send errors {@link
* net.snowflake.ingest.streaming.InsertValidationResponse.InsertError} in form of response inside
* {@link InsertValidationResponse}
*
* @return InsertValidationResponse a response that wraps around InsertValidationResponse
*/
private InsertValidationResponse insertRowWithFallback(
Map transformedRecord, long offset) {
Fallback