software.amazon.awssdk.services.glue.model.KafkaStreamingSourceOptions Maven / Gradle / Ivy
Show all versions of glue Show documentation
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package software.amazon.awssdk.services.glue.model;
import java.io.Serializable;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiConsumer;
import java.util.function.Function;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.core.SdkField;
import software.amazon.awssdk.core.SdkPojo;
import software.amazon.awssdk.core.protocol.MarshallLocation;
import software.amazon.awssdk.core.protocol.MarshallingType;
import software.amazon.awssdk.core.traits.LocationTrait;
import software.amazon.awssdk.core.traits.TimestampFormatTrait;
import software.amazon.awssdk.utils.ToString;
import software.amazon.awssdk.utils.builder.CopyableBuilder;
import software.amazon.awssdk.utils.builder.ToCopyableBuilder;
/**
*
* Additional options for streaming.
*
*/
@Generated("software.amazon.awssdk:codegen")
public final class KafkaStreamingSourceOptions implements SdkPojo, Serializable,
ToCopyableBuilder {
private static final SdkField BOOTSTRAP_SERVERS_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("BootstrapServers").getter(getter(KafkaStreamingSourceOptions::bootstrapServers))
.setter(setter(Builder::bootstrapServers))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("BootstrapServers").build()).build();
private static final SdkField SECURITY_PROTOCOL_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("SecurityProtocol").getter(getter(KafkaStreamingSourceOptions::securityProtocol))
.setter(setter(Builder::securityProtocol))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SecurityProtocol").build()).build();
private static final SdkField CONNECTION_NAME_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("ConnectionName").getter(getter(KafkaStreamingSourceOptions::connectionName))
.setter(setter(Builder::connectionName))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ConnectionName").build()).build();
private static final SdkField TOPIC_NAME_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("TopicName").getter(getter(KafkaStreamingSourceOptions::topicName)).setter(setter(Builder::topicName))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("TopicName").build()).build();
private static final SdkField ASSIGN_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Assign")
.getter(getter(KafkaStreamingSourceOptions::assign)).setter(setter(Builder::assign))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Assign").build()).build();
private static final SdkField SUBSCRIBE_PATTERN_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("SubscribePattern").getter(getter(KafkaStreamingSourceOptions::subscribePattern))
.setter(setter(Builder::subscribePattern))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SubscribePattern").build()).build();
private static final SdkField CLASSIFICATION_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("Classification").getter(getter(KafkaStreamingSourceOptions::classification))
.setter(setter(Builder::classification))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Classification").build()).build();
private static final SdkField DELIMITER_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("Delimiter").getter(getter(KafkaStreamingSourceOptions::delimiter)).setter(setter(Builder::delimiter))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Delimiter").build()).build();
private static final SdkField STARTING_OFFSETS_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("StartingOffsets").getter(getter(KafkaStreamingSourceOptions::startingOffsets))
.setter(setter(Builder::startingOffsets))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("StartingOffsets").build()).build();
private static final SdkField ENDING_OFFSETS_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("EndingOffsets").getter(getter(KafkaStreamingSourceOptions::endingOffsets))
.setter(setter(Builder::endingOffsets))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("EndingOffsets").build()).build();
private static final SdkField POLL_TIMEOUT_MS_FIELD = SdkField. builder(MarshallingType.LONG)
.memberName("PollTimeoutMs").getter(getter(KafkaStreamingSourceOptions::pollTimeoutMs))
.setter(setter(Builder::pollTimeoutMs))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("PollTimeoutMs").build()).build();
private static final SdkField NUM_RETRIES_FIELD = SdkField. builder(MarshallingType.INTEGER)
.memberName("NumRetries").getter(getter(KafkaStreamingSourceOptions::numRetries)).setter(setter(Builder::numRetries))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NumRetries").build()).build();
private static final SdkField RETRY_INTERVAL_MS_FIELD = SdkField. builder(MarshallingType.LONG)
.memberName("RetryIntervalMs").getter(getter(KafkaStreamingSourceOptions::retryIntervalMs))
.setter(setter(Builder::retryIntervalMs))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("RetryIntervalMs").build()).build();
private static final SdkField MAX_OFFSETS_PER_TRIGGER_FIELD = SdkField. builder(MarshallingType.LONG)
.memberName("MaxOffsetsPerTrigger").getter(getter(KafkaStreamingSourceOptions::maxOffsetsPerTrigger))
.setter(setter(Builder::maxOffsetsPerTrigger))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MaxOffsetsPerTrigger").build())
.build();
private static final SdkField MIN_PARTITIONS_FIELD = SdkField. builder(MarshallingType.INTEGER)
.memberName("MinPartitions").getter(getter(KafkaStreamingSourceOptions::minPartitions))
.setter(setter(Builder::minPartitions))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MinPartitions").build()).build();
private static final SdkField INCLUDE_HEADERS_FIELD = SdkField. builder(MarshallingType.BOOLEAN)
.memberName("IncludeHeaders").getter(getter(KafkaStreamingSourceOptions::includeHeaders))
.setter(setter(Builder::includeHeaders))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeHeaders").build()).build();
private static final SdkField ADD_RECORD_TIMESTAMP_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("AddRecordTimestamp").getter(getter(KafkaStreamingSourceOptions::addRecordTimestamp))
.setter(setter(Builder::addRecordTimestamp))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("AddRecordTimestamp").build())
.build();
private static final SdkField EMIT_CONSUMER_LAG_METRICS_FIELD = SdkField. builder(MarshallingType.STRING)
.memberName("EmitConsumerLagMetrics").getter(getter(KafkaStreamingSourceOptions::emitConsumerLagMetrics))
.setter(setter(Builder::emitConsumerLagMetrics))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("EmitConsumerLagMetrics").build())
.build();
private static final SdkField STARTING_TIMESTAMP_FIELD = SdkField
. builder(MarshallingType.INSTANT)
.memberName("StartingTimestamp")
.getter(getter(KafkaStreamingSourceOptions::startingTimestamp))
.setter(setter(Builder::startingTimestamp))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("StartingTimestamp").build(),
TimestampFormatTrait.create(TimestampFormatTrait.Format.ISO_8601)).build();
private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BOOTSTRAP_SERVERS_FIELD,
SECURITY_PROTOCOL_FIELD, CONNECTION_NAME_FIELD, TOPIC_NAME_FIELD, ASSIGN_FIELD, SUBSCRIBE_PATTERN_FIELD,
CLASSIFICATION_FIELD, DELIMITER_FIELD, STARTING_OFFSETS_FIELD, ENDING_OFFSETS_FIELD, POLL_TIMEOUT_MS_FIELD,
NUM_RETRIES_FIELD, RETRY_INTERVAL_MS_FIELD, MAX_OFFSETS_PER_TRIGGER_FIELD, MIN_PARTITIONS_FIELD,
INCLUDE_HEADERS_FIELD, ADD_RECORD_TIMESTAMP_FIELD, EMIT_CONSUMER_LAG_METRICS_FIELD, STARTING_TIMESTAMP_FIELD));
private static final Map> SDK_NAME_TO_FIELD = Collections
.unmodifiableMap(new HashMap>() {
{
put("BootstrapServers", BOOTSTRAP_SERVERS_FIELD);
put("SecurityProtocol", SECURITY_PROTOCOL_FIELD);
put("ConnectionName", CONNECTION_NAME_FIELD);
put("TopicName", TOPIC_NAME_FIELD);
put("Assign", ASSIGN_FIELD);
put("SubscribePattern", SUBSCRIBE_PATTERN_FIELD);
put("Classification", CLASSIFICATION_FIELD);
put("Delimiter", DELIMITER_FIELD);
put("StartingOffsets", STARTING_OFFSETS_FIELD);
put("EndingOffsets", ENDING_OFFSETS_FIELD);
put("PollTimeoutMs", POLL_TIMEOUT_MS_FIELD);
put("NumRetries", NUM_RETRIES_FIELD);
put("RetryIntervalMs", RETRY_INTERVAL_MS_FIELD);
put("MaxOffsetsPerTrigger", MAX_OFFSETS_PER_TRIGGER_FIELD);
put("MinPartitions", MIN_PARTITIONS_FIELD);
put("IncludeHeaders", INCLUDE_HEADERS_FIELD);
put("AddRecordTimestamp", ADD_RECORD_TIMESTAMP_FIELD);
put("EmitConsumerLagMetrics", EMIT_CONSUMER_LAG_METRICS_FIELD);
put("StartingTimestamp", STARTING_TIMESTAMP_FIELD);
}
});
private static final long serialVersionUID = 1L;
private final String bootstrapServers;
private final String securityProtocol;
private final String connectionName;
private final String topicName;
private final String assign;
private final String subscribePattern;
private final String classification;
private final String delimiter;
private final String startingOffsets;
private final String endingOffsets;
private final Long pollTimeoutMs;
private final Integer numRetries;
private final Long retryIntervalMs;
private final Long maxOffsetsPerTrigger;
private final Integer minPartitions;
private final Boolean includeHeaders;
private final String addRecordTimestamp;
private final String emitConsumerLagMetrics;
private final Instant startingTimestamp;
private KafkaStreamingSourceOptions(BuilderImpl builder) {
this.bootstrapServers = builder.bootstrapServers;
this.securityProtocol = builder.securityProtocol;
this.connectionName = builder.connectionName;
this.topicName = builder.topicName;
this.assign = builder.assign;
this.subscribePattern = builder.subscribePattern;
this.classification = builder.classification;
this.delimiter = builder.delimiter;
this.startingOffsets = builder.startingOffsets;
this.endingOffsets = builder.endingOffsets;
this.pollTimeoutMs = builder.pollTimeoutMs;
this.numRetries = builder.numRetries;
this.retryIntervalMs = builder.retryIntervalMs;
this.maxOffsetsPerTrigger = builder.maxOffsetsPerTrigger;
this.minPartitions = builder.minPartitions;
this.includeHeaders = builder.includeHeaders;
this.addRecordTimestamp = builder.addRecordTimestamp;
this.emitConsumerLagMetrics = builder.emitConsumerLagMetrics;
this.startingTimestamp = builder.startingTimestamp;
}
/**
*
* A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified in the
* API call or defined in the table metadata in the Data Catalog.
*
*
* @return A list of bootstrap server URLs, for example, as
* b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094
. This option must be specified
* in the API call or defined in the table metadata in the Data Catalog.
*/
public final String bootstrapServers() {
return bootstrapServers;
}
/**
*
* The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*
*
* @return The protocol used to communicate with brokers. The possible values are "SSL"
or
* "PLAINTEXT"
.
*/
public final String securityProtocol() {
return securityProtocol;
}
/**
*
* The name of the connection.
*
*
* @return The name of the connection.
*/
public final String connectionName() {
return connectionName;
}
/**
*
* The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
*
* @return The topic name as specified in Apache Kafka. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*/
public final String topicName() {
return topicName;
}
/**
*
* The specific TopicPartitions
to consume. You must specify at least one of "topicName"
,
* "assign"
or "subscribePattern"
.
*
*
* @return The specific TopicPartitions
to consume. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*/
public final String assign() {
return assign;
}
/**
*
* A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*
*
* @return A Java regex string that identifies the topic list to subscribe to. You must specify at least one of
* "topicName"
, "assign"
or "subscribePattern"
.
*/
public final String subscribePattern() {
return subscribePattern;
}
/**
*
* An optional classification.
*
*
* @return An optional classification.
*/
public final String classification() {
return classification;
}
/**
*
* Specifies the delimiter character.
*
*
* @return Specifies the delimiter character.
*/
public final String delimiter() {
return delimiter;
}
/**
*
* The starting position in the Kafka topic to read data from. The possible values are "earliest"
or
* "latest"
. The default value is "latest"
.
*
*
* @return The starting position in the Kafka topic to read data from. The possible values are
* "earliest"
or "latest"
. The default value is "latest"
.
*/
public final String startingOffsets() {
return startingOffsets;
}
/**
*
* The end point when a batch query is ended. Possible values are either "latest"
or a JSON string that
* specifies an ending offset for each TopicPartition
.
*
*
* @return The end point when a batch query is ended. Possible values are either "latest"
or a JSON
* string that specifies an ending offset for each TopicPartition
.
*/
public final String endingOffsets() {
return endingOffsets;
}
/**
*
* The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is 512
* .
*
*
* @return The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is
* 512
.
*/
public final Long pollTimeoutMs() {
return pollTimeoutMs;
}
/**
*
* The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*
*
* @return The number of times to retry before failing to fetch Kafka offsets. The default value is 3
.
*/
public final Integer numRetries() {
return numRetries;
}
/**
*
* The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is 10
.
*
*
* @return The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is
* 10
.
*/
public final Long retryIntervalMs() {
return retryIntervalMs;
}
/**
*
* The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total
* number of offsets is proportionally split across topicPartitions
of different volumes. The default
* value is null, which means that the consumer reads all offsets until the known latest offset.
*
*
* @return The rate limit on the maximum number of offsets that are processed per trigger interval. The specified
* total number of offsets is proportionally split across topicPartitions
of different volumes.
* The default value is null, which means that the consumer reads all offsets until the known latest offset.
*/
public final Long maxOffsetsPerTrigger() {
return maxOffsetsPerTrigger;
}
/**
*
* The desired minimum number of partitions to read from Kafka. The default value is null, which means that the
* number of spark partitions is equal to the number of Kafka partitions.
*
*
* @return The desired minimum number of partitions to read from Kafka. The default value is null, which means that
* the number of spark partitions is equal to the number of Kafka partitions.
*/
public final Integer minPartitions() {
return minPartitions;
}
/**
*
* Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is available in
* Glue version 3.0 or later only.
*
*
* @return Whether to include the Kafka headers. When the option is set to "true", the data output will contain an
* additional column named "glue_streaming_kafka_headers" with type
* Array[Struct(key: String, value: String)]
. The default value is "false". This option is
* available in Glue version 3.0 or later only.
*/
public final Boolean includeHeaders() {
return includeHeaders;
}
/**
*
* When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that
* indicates the time when the corresponding record received by the topic. The default value is 'false'. This option
* is supported in Glue version 4.0 or later.
*
*
* @return When this option is set to 'true', the data output will contain an additional column named
* "__src_timestamp" that indicates the time when the corresponding record received by the topic. The
* default value is 'false'. This option is supported in Glue version 4.0 or later.
*/
public final String addRecordTimestamp() {
return addRecordTimestamp;
}
/**
*
* When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest
* record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is
* "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue
* version 4.0 or later.
*
*
* @return When this option is set to 'true', for each batch, it will emit the metrics for the duration between the
* oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is
* "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in
* Glue version 4.0 or later.
*/
public final String emitConsumerLagMetrics() {
return emitConsumerLagMetrics;
}
/**
*
* The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp
* string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC timezone offset
* with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*
*
* @return The timestamp of the record in the Kafka topic to start reading data from. The possible values are a
* timestamp string in UTC format of the pattern yyyy-mm-ddTHH:MM:SSZ
(where Z represents a UTC
* timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
*/
public final Instant startingTimestamp() {
return startingTimestamp;
}
@Override
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public static Class extends Builder> serializableBuilderClass() {
return BuilderImpl.class;
}
@Override
public final int hashCode() {
int hashCode = 1;
hashCode = 31 * hashCode + Objects.hashCode(bootstrapServers());
hashCode = 31 * hashCode + Objects.hashCode(securityProtocol());
hashCode = 31 * hashCode + Objects.hashCode(connectionName());
hashCode = 31 * hashCode + Objects.hashCode(topicName());
hashCode = 31 * hashCode + Objects.hashCode(assign());
hashCode = 31 * hashCode + Objects.hashCode(subscribePattern());
hashCode = 31 * hashCode + Objects.hashCode(classification());
hashCode = 31 * hashCode + Objects.hashCode(delimiter());
hashCode = 31 * hashCode + Objects.hashCode(startingOffsets());
hashCode = 31 * hashCode + Objects.hashCode(endingOffsets());
hashCode = 31 * hashCode + Objects.hashCode(pollTimeoutMs());
hashCode = 31 * hashCode + Objects.hashCode(numRetries());
hashCode = 31 * hashCode + Objects.hashCode(retryIntervalMs());
hashCode = 31 * hashCode + Objects.hashCode(maxOffsetsPerTrigger());
hashCode = 31 * hashCode + Objects.hashCode(minPartitions());
hashCode = 31 * hashCode + Objects.hashCode(includeHeaders());
hashCode = 31 * hashCode + Objects.hashCode(addRecordTimestamp());
hashCode = 31 * hashCode + Objects.hashCode(emitConsumerLagMetrics());
hashCode = 31 * hashCode + Objects.hashCode(startingTimestamp());
return hashCode;
}
@Override
public final boolean equals(Object obj) {
return equalsBySdkFields(obj);
}
@Override
public final boolean equalsBySdkFields(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof KafkaStreamingSourceOptions)) {
return false;
}
KafkaStreamingSourceOptions other = (KafkaStreamingSourceOptions) obj;
return Objects.equals(bootstrapServers(), other.bootstrapServers())
&& Objects.equals(securityProtocol(), other.securityProtocol())
&& Objects.equals(connectionName(), other.connectionName()) && Objects.equals(topicName(), other.topicName())
&& Objects.equals(assign(), other.assign()) && Objects.equals(subscribePattern(), other.subscribePattern())
&& Objects.equals(classification(), other.classification()) && Objects.equals(delimiter(), other.delimiter())
&& Objects.equals(startingOffsets(), other.startingOffsets())
&& Objects.equals(endingOffsets(), other.endingOffsets())
&& Objects.equals(pollTimeoutMs(), other.pollTimeoutMs()) && Objects.equals(numRetries(), other.numRetries())
&& Objects.equals(retryIntervalMs(), other.retryIntervalMs())
&& Objects.equals(maxOffsetsPerTrigger(), other.maxOffsetsPerTrigger())
&& Objects.equals(minPartitions(), other.minPartitions())
&& Objects.equals(includeHeaders(), other.includeHeaders())
&& Objects.equals(addRecordTimestamp(), other.addRecordTimestamp())
&& Objects.equals(emitConsumerLagMetrics(), other.emitConsumerLagMetrics())
&& Objects.equals(startingTimestamp(), other.startingTimestamp());
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*/
@Override
public final String toString() {
return ToString.builder("KafkaStreamingSourceOptions").add("BootstrapServers", bootstrapServers())
.add("SecurityProtocol", securityProtocol()).add("ConnectionName", connectionName())
.add("TopicName", topicName()).add("Assign", assign()).add("SubscribePattern", subscribePattern())
.add("Classification", classification()).add("Delimiter", delimiter()).add("StartingOffsets", startingOffsets())
.add("EndingOffsets", endingOffsets()).add("PollTimeoutMs", pollTimeoutMs()).add("NumRetries", numRetries())
.add("RetryIntervalMs", retryIntervalMs()).add("MaxOffsetsPerTrigger", maxOffsetsPerTrigger())
.add("MinPartitions", minPartitions()).add("IncludeHeaders", includeHeaders())
.add("AddRecordTimestamp", addRecordTimestamp()).add("EmitConsumerLagMetrics", emitConsumerLagMetrics())
.add("StartingTimestamp", startingTimestamp()).build();
}
public final Optional getValueForField(String fieldName, Class clazz) {
switch (fieldName) {
case "BootstrapServers":
return Optional.ofNullable(clazz.cast(bootstrapServers()));
case "SecurityProtocol":
return Optional.ofNullable(clazz.cast(securityProtocol()));
case "ConnectionName":
return Optional.ofNullable(clazz.cast(connectionName()));
case "TopicName":
return Optional.ofNullable(clazz.cast(topicName()));
case "Assign":
return Optional.ofNullable(clazz.cast(assign()));
case "SubscribePattern":
return Optional.ofNullable(clazz.cast(subscribePattern()));
case "Classification":
return Optional.ofNullable(clazz.cast(classification()));
case "Delimiter":
return Optional.ofNullable(clazz.cast(delimiter()));
case "StartingOffsets":
return Optional.ofNullable(clazz.cast(startingOffsets()));
case "EndingOffsets":
return Optional.ofNullable(clazz.cast(endingOffsets()));
case "PollTimeoutMs":
return Optional.ofNullable(clazz.cast(pollTimeoutMs()));
case "NumRetries":
return Optional.ofNullable(clazz.cast(numRetries()));
case "RetryIntervalMs":
return Optional.ofNullable(clazz.cast(retryIntervalMs()));
case "MaxOffsetsPerTrigger":
return Optional.ofNullable(clazz.cast(maxOffsetsPerTrigger()));
case "MinPartitions":
return Optional.ofNullable(clazz.cast(minPartitions()));
case "IncludeHeaders":
return Optional.ofNullable(clazz.cast(includeHeaders()));
case "AddRecordTimestamp":
return Optional.ofNullable(clazz.cast(addRecordTimestamp()));
case "EmitConsumerLagMetrics":
return Optional.ofNullable(clazz.cast(emitConsumerLagMetrics()));
case "StartingTimestamp":
return Optional.ofNullable(clazz.cast(startingTimestamp()));
default:
return Optional.empty();
}
}
@Override
public final List> sdkFields() {
return SDK_FIELDS;
}
@Override
public final Map> sdkFieldNameToField() {
return SDK_NAME_TO_FIELD;
}
private static Function
*
* Only one of StartingTimestamp
or StartingOffsets
must be set.
* @return Returns a reference to this object so that method calls can be chained together.
*/
Builder startingTimestamp(Instant startingTimestamp);
}
static final class BuilderImpl implements Builder {
private String bootstrapServers;
private String securityProtocol;
private String connectionName;
private String topicName;
private String assign;
private String subscribePattern;
private String classification;
private String delimiter;
private String startingOffsets;
private String endingOffsets;
private Long pollTimeoutMs;
private Integer numRetries;
private Long retryIntervalMs;
private Long maxOffsetsPerTrigger;
private Integer minPartitions;
private Boolean includeHeaders;
private String addRecordTimestamp;
private String emitConsumerLagMetrics;
private Instant startingTimestamp;
private BuilderImpl() {
}
private BuilderImpl(KafkaStreamingSourceOptions model) {
bootstrapServers(model.bootstrapServers);
securityProtocol(model.securityProtocol);
connectionName(model.connectionName);
topicName(model.topicName);
assign(model.assign);
subscribePattern(model.subscribePattern);
classification(model.classification);
delimiter(model.delimiter);
startingOffsets(model.startingOffsets);
endingOffsets(model.endingOffsets);
pollTimeoutMs(model.pollTimeoutMs);
numRetries(model.numRetries);
retryIntervalMs(model.retryIntervalMs);
maxOffsetsPerTrigger(model.maxOffsetsPerTrigger);
minPartitions(model.minPartitions);
includeHeaders(model.includeHeaders);
addRecordTimestamp(model.addRecordTimestamp);
emitConsumerLagMetrics(model.emitConsumerLagMetrics);
startingTimestamp(model.startingTimestamp);
}
public final String getBootstrapServers() {
return bootstrapServers;
}
public final void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
@Override
public final Builder bootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
return this;
}
public final String getSecurityProtocol() {
return securityProtocol;
}
public final void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
@Override
public final Builder securityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
return this;
}
public final String getConnectionName() {
return connectionName;
}
public final void setConnectionName(String connectionName) {
this.connectionName = connectionName;
}
@Override
public final Builder connectionName(String connectionName) {
this.connectionName = connectionName;
return this;
}
public final String getTopicName() {
return topicName;
}
public final void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public final Builder topicName(String topicName) {
this.topicName = topicName;
return this;
}
public final String getAssign() {
return assign;
}
public final void setAssign(String assign) {
this.assign = assign;
}
@Override
public final Builder assign(String assign) {
this.assign = assign;
return this;
}
public final String getSubscribePattern() {
return subscribePattern;
}
public final void setSubscribePattern(String subscribePattern) {
this.subscribePattern = subscribePattern;
}
@Override
public final Builder subscribePattern(String subscribePattern) {
this.subscribePattern = subscribePattern;
return this;
}
public final String getClassification() {
return classification;
}
public final void setClassification(String classification) {
this.classification = classification;
}
@Override
public final Builder classification(String classification) {
this.classification = classification;
return this;
}
public final String getDelimiter() {
return delimiter;
}
public final void setDelimiter(String delimiter) {
this.delimiter = delimiter;
}
@Override
public final Builder delimiter(String delimiter) {
this.delimiter = delimiter;
return this;
}
public final String getStartingOffsets() {
return startingOffsets;
}
public final void setStartingOffsets(String startingOffsets) {
this.startingOffsets = startingOffsets;
}
@Override
public final Builder startingOffsets(String startingOffsets) {
this.startingOffsets = startingOffsets;
return this;
}
public final String getEndingOffsets() {
return endingOffsets;
}
public final void setEndingOffsets(String endingOffsets) {
this.endingOffsets = endingOffsets;
}
@Override
public final Builder endingOffsets(String endingOffsets) {
this.endingOffsets = endingOffsets;
return this;
}
public final Long getPollTimeoutMs() {
return pollTimeoutMs;
}
public final void setPollTimeoutMs(Long pollTimeoutMs) {
this.pollTimeoutMs = pollTimeoutMs;
}
@Override
public final Builder pollTimeoutMs(Long pollTimeoutMs) {
this.pollTimeoutMs = pollTimeoutMs;
return this;
}
public final Integer getNumRetries() {
return numRetries;
}
public final void setNumRetries(Integer numRetries) {
this.numRetries = numRetries;
}
@Override
public final Builder numRetries(Integer numRetries) {
this.numRetries = numRetries;
return this;
}
public final Long getRetryIntervalMs() {
return retryIntervalMs;
}
public final void setRetryIntervalMs(Long retryIntervalMs) {
this.retryIntervalMs = retryIntervalMs;
}
@Override
public final Builder retryIntervalMs(Long retryIntervalMs) {
this.retryIntervalMs = retryIntervalMs;
return this;
}
public final Long getMaxOffsetsPerTrigger() {
return maxOffsetsPerTrigger;
}
public final void setMaxOffsetsPerTrigger(Long maxOffsetsPerTrigger) {
this.maxOffsetsPerTrigger = maxOffsetsPerTrigger;
}
@Override
public final Builder maxOffsetsPerTrigger(Long maxOffsetsPerTrigger) {
this.maxOffsetsPerTrigger = maxOffsetsPerTrigger;
return this;
}
public final Integer getMinPartitions() {
return minPartitions;
}
public final void setMinPartitions(Integer minPartitions) {
this.minPartitions = minPartitions;
}
@Override
public final Builder minPartitions(Integer minPartitions) {
this.minPartitions = minPartitions;
return this;
}
public final Boolean getIncludeHeaders() {
return includeHeaders;
}
public final void setIncludeHeaders(Boolean includeHeaders) {
this.includeHeaders = includeHeaders;
}
@Override
public final Builder includeHeaders(Boolean includeHeaders) {
this.includeHeaders = includeHeaders;
return this;
}
public final String getAddRecordTimestamp() {
return addRecordTimestamp;
}
public final void setAddRecordTimestamp(String addRecordTimestamp) {
this.addRecordTimestamp = addRecordTimestamp;
}
@Override
public final Builder addRecordTimestamp(String addRecordTimestamp) {
this.addRecordTimestamp = addRecordTimestamp;
return this;
}
public final String getEmitConsumerLagMetrics() {
return emitConsumerLagMetrics;
}
public final void setEmitConsumerLagMetrics(String emitConsumerLagMetrics) {
this.emitConsumerLagMetrics = emitConsumerLagMetrics;
}
@Override
public final Builder emitConsumerLagMetrics(String emitConsumerLagMetrics) {
this.emitConsumerLagMetrics = emitConsumerLagMetrics;
return this;
}
public final Instant getStartingTimestamp() {
return startingTimestamp;
}
public final void setStartingTimestamp(Instant startingTimestamp) {
this.startingTimestamp = startingTimestamp;
}
@Override
public final Builder startingTimestamp(Instant startingTimestamp) {
this.startingTimestamp = startingTimestamp;
return this;
}
@Override
public KafkaStreamingSourceOptions build() {
return new KafkaStreamingSourceOptions(this);
}
@Override
public List> sdkFields() {
return SDK_FIELDS;
}
@Override
public Map> sdkFieldNameToField() {
return SDK_NAME_TO_FIELD;
}
}
}