All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.glue.model.KafkaStreamingSourceOptions.kt Maven / Gradle / Ivy

The newest version!
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.glue.model

import aws.smithy.kotlin.runtime.SdkDsl
import aws.smithy.kotlin.runtime.time.Instant

/**
 * Additional options for streaming.
 */
public class KafkaStreamingSourceOptions private constructor(builder: Builder) {
    /**
     * When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default value is 'false'. This option is supported in Glue version 4.0 or later.
     */
    public val addRecordTimestamp: kotlin.String? = builder.addRecordTimestamp
    /**
     * The specific `TopicPartitions` to consume. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
     */
    public val assign: kotlin.String? = builder.assign
    /**
     * A list of bootstrap server URLs, for example, as `b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094`. This option must be specified in the API call or defined in the table metadata in the Data Catalog.
     */
    public val bootstrapServers: kotlin.String? = builder.bootstrapServers
    /**
     * An optional classification.
     */
    public val classification: kotlin.String? = builder.classification
    /**
     * The name of the connection.
     */
    public val connectionName: kotlin.String? = builder.connectionName
    /**
     * Specifies the delimiter character.
     */
    public val delimiter: kotlin.String? = builder.delimiter
    /**
     * When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue version 4.0 or later.
     */
    public val emitConsumerLagMetrics: kotlin.String? = builder.emitConsumerLagMetrics
    /**
     * The end point when a batch query is ended. Possible values are either `"latest"` or a JSON string that specifies an ending offset for each `TopicPartition`.
     */
    public val endingOffsets: kotlin.String? = builder.endingOffsets
    /**
     * Whether to include the Kafka headers. When the option is set to "true", the data output will contain an additional column named "glue_streaming_kafka_headers" with type `Array[Struct(key: String, value: String)]`. The default value is "false". This option is available in Glue version 3.0 or later only.
     */
    public val includeHeaders: kotlin.Boolean? = builder.includeHeaders
    /**
     * The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across `topicPartitions` of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.
     */
    public val maxOffsetsPerTrigger: kotlin.Long? = builder.maxOffsetsPerTrigger
    /**
     * The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.
     */
    public val minPartitions: kotlin.Int? = builder.minPartitions
    /**
     * The number of times to retry before failing to fetch Kafka offsets. The default value is `3`.
     */
    public val numRetries: kotlin.Int? = builder.numRetries
    /**
     * The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is `512`.
     */
    public val pollTimeoutMs: kotlin.Long? = builder.pollTimeoutMs
    /**
     * The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is `10`.
     */
    public val retryIntervalMs: kotlin.Long? = builder.retryIntervalMs
    /**
     * The protocol used to communicate with brokers. The possible values are `"SSL"` or `"PLAINTEXT"`.
     */
    public val securityProtocol: kotlin.String? = builder.securityProtocol
    /**
     * The starting position in the Kafka topic to read data from. The possible values are `"earliest"` or `"latest"`. The default value is `"latest"`.
     */
    public val startingOffsets: kotlin.String? = builder.startingOffsets
    /**
     * The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp string in UTC format of the pattern `yyyy-mm-ddTHH:MM:SSZ` (where Z represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
     *
     * Only one of `StartingTimestamp` or `StartingOffsets` must be set.
     */
    public val startingTimestamp: aws.smithy.kotlin.runtime.time.Instant? = builder.startingTimestamp
    /**
     * A Java regex string that identifies the topic list to subscribe to. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
     */
    public val subscribePattern: kotlin.String? = builder.subscribePattern
    /**
     * The topic name as specified in Apache Kafka. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
     */
    public val topicName: kotlin.String? = builder.topicName

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.glue.model.KafkaStreamingSourceOptions = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("KafkaStreamingSourceOptions(")
        append("addRecordTimestamp=$addRecordTimestamp,")
        append("assign=$assign,")
        append("bootstrapServers=$bootstrapServers,")
        append("classification=$classification,")
        append("connectionName=$connectionName,")
        append("delimiter=$delimiter,")
        append("emitConsumerLagMetrics=$emitConsumerLagMetrics,")
        append("endingOffsets=$endingOffsets,")
        append("includeHeaders=$includeHeaders,")
        append("maxOffsetsPerTrigger=$maxOffsetsPerTrigger,")
        append("minPartitions=$minPartitions,")
        append("numRetries=$numRetries,")
        append("pollTimeoutMs=$pollTimeoutMs,")
        append("retryIntervalMs=$retryIntervalMs,")
        append("securityProtocol=$securityProtocol,")
        append("startingOffsets=$startingOffsets,")
        append("startingTimestamp=$startingTimestamp,")
        append("subscribePattern=$subscribePattern,")
        append("topicName=$topicName")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = addRecordTimestamp?.hashCode() ?: 0
        result = 31 * result + (assign?.hashCode() ?: 0)
        result = 31 * result + (bootstrapServers?.hashCode() ?: 0)
        result = 31 * result + (classification?.hashCode() ?: 0)
        result = 31 * result + (connectionName?.hashCode() ?: 0)
        result = 31 * result + (delimiter?.hashCode() ?: 0)
        result = 31 * result + (emitConsumerLagMetrics?.hashCode() ?: 0)
        result = 31 * result + (endingOffsets?.hashCode() ?: 0)
        result = 31 * result + (includeHeaders?.hashCode() ?: 0)
        result = 31 * result + (maxOffsetsPerTrigger?.hashCode() ?: 0)
        result = 31 * result + (minPartitions ?: 0)
        result = 31 * result + (numRetries ?: 0)
        result = 31 * result + (pollTimeoutMs?.hashCode() ?: 0)
        result = 31 * result + (retryIntervalMs?.hashCode() ?: 0)
        result = 31 * result + (securityProtocol?.hashCode() ?: 0)
        result = 31 * result + (startingOffsets?.hashCode() ?: 0)
        result = 31 * result + (startingTimestamp?.hashCode() ?: 0)
        result = 31 * result + (subscribePattern?.hashCode() ?: 0)
        result = 31 * result + (topicName?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as KafkaStreamingSourceOptions

        if (addRecordTimestamp != other.addRecordTimestamp) return false
        if (assign != other.assign) return false
        if (bootstrapServers != other.bootstrapServers) return false
        if (classification != other.classification) return false
        if (connectionName != other.connectionName) return false
        if (delimiter != other.delimiter) return false
        if (emitConsumerLagMetrics != other.emitConsumerLagMetrics) return false
        if (endingOffsets != other.endingOffsets) return false
        if (includeHeaders != other.includeHeaders) return false
        if (maxOffsetsPerTrigger != other.maxOffsetsPerTrigger) return false
        if (minPartitions != other.minPartitions) return false
        if (numRetries != other.numRetries) return false
        if (pollTimeoutMs != other.pollTimeoutMs) return false
        if (retryIntervalMs != other.retryIntervalMs) return false
        if (securityProtocol != other.securityProtocol) return false
        if (startingOffsets != other.startingOffsets) return false
        if (startingTimestamp != other.startingTimestamp) return false
        if (subscribePattern != other.subscribePattern) return false
        if (topicName != other.topicName) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.glue.model.KafkaStreamingSourceOptions = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * When this option is set to 'true', the data output will contain an additional column named "__src_timestamp" that indicates the time when the corresponding record received by the topic. The default value is 'false'. This option is supported in Glue version 4.0 or later.
         */
        public var addRecordTimestamp: kotlin.String? = null
        /**
         * The specific `TopicPartitions` to consume. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
         */
        public var assign: kotlin.String? = null
        /**
         * A list of bootstrap server URLs, for example, as `b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094`. This option must be specified in the API call or defined in the table metadata in the Data Catalog.
         */
        public var bootstrapServers: kotlin.String? = null
        /**
         * An optional classification.
         */
        public var classification: kotlin.String? = null
        /**
         * The name of the connection.
         */
        public var connectionName: kotlin.String? = null
        /**
         * Specifies the delimiter character.
         */
        public var delimiter: kotlin.String? = null
        /**
         * When this option is set to 'true', for each batch, it will emit the metrics for the duration between the oldest record received by the topic and the time it arrives in Glue to CloudWatch. The metric's name is "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This option is supported in Glue version 4.0 or later.
         */
        public var emitConsumerLagMetrics: kotlin.String? = null
        /**
         * The end point when a batch query is ended. Possible values are either `"latest"` or a JSON string that specifies an ending offset for each `TopicPartition`.
         */
        public var endingOffsets: kotlin.String? = null
        /**
         * Whether to include the Kafka headers. When the option is set to "true", the data output will contain an additional column named "glue_streaming_kafka_headers" with type `Array[Struct(key: String, value: String)]`. The default value is "false". This option is available in Glue version 3.0 or later only.
         */
        public var includeHeaders: kotlin.Boolean? = null
        /**
         * The rate limit on the maximum number of offsets that are processed per trigger interval. The specified total number of offsets is proportionally split across `topicPartitions` of different volumes. The default value is null, which means that the consumer reads all offsets until the known latest offset.
         */
        public var maxOffsetsPerTrigger: kotlin.Long? = null
        /**
         * The desired minimum number of partitions to read from Kafka. The default value is null, which means that the number of spark partitions is equal to the number of Kafka partitions.
         */
        public var minPartitions: kotlin.Int? = null
        /**
         * The number of times to retry before failing to fetch Kafka offsets. The default value is `3`.
         */
        public var numRetries: kotlin.Int? = null
        /**
         * The timeout in milliseconds to poll data from Kafka in Spark job executors. The default value is `512`.
         */
        public var pollTimeoutMs: kotlin.Long? = null
        /**
         * The time in milliseconds to wait before retrying to fetch Kafka offsets. The default value is `10`.
         */
        public var retryIntervalMs: kotlin.Long? = null
        /**
         * The protocol used to communicate with brokers. The possible values are `"SSL"` or `"PLAINTEXT"`.
         */
        public var securityProtocol: kotlin.String? = null
        /**
         * The starting position in the Kafka topic to read data from. The possible values are `"earliest"` or `"latest"`. The default value is `"latest"`.
         */
        public var startingOffsets: kotlin.String? = null
        /**
         * The timestamp of the record in the Kafka topic to start reading data from. The possible values are a timestamp string in UTC format of the pattern `yyyy-mm-ddTHH:MM:SSZ` (where Z represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00+08:00").
         *
         * Only one of `StartingTimestamp` or `StartingOffsets` must be set.
         */
        public var startingTimestamp: aws.smithy.kotlin.runtime.time.Instant? = null
        /**
         * A Java regex string that identifies the topic list to subscribe to. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
         */
        public var subscribePattern: kotlin.String? = null
        /**
         * The topic name as specified in Apache Kafka. You must specify at least one of `"topicName"`, `"assign"` or `"subscribePattern"`.
         */
        public var topicName: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.glue.model.KafkaStreamingSourceOptions) : this() {
            this.addRecordTimestamp = x.addRecordTimestamp
            this.assign = x.assign
            this.bootstrapServers = x.bootstrapServers
            this.classification = x.classification
            this.connectionName = x.connectionName
            this.delimiter = x.delimiter
            this.emitConsumerLagMetrics = x.emitConsumerLagMetrics
            this.endingOffsets = x.endingOffsets
            this.includeHeaders = x.includeHeaders
            this.maxOffsetsPerTrigger = x.maxOffsetsPerTrigger
            this.minPartitions = x.minPartitions
            this.numRetries = x.numRetries
            this.pollTimeoutMs = x.pollTimeoutMs
            this.retryIntervalMs = x.retryIntervalMs
            this.securityProtocol = x.securityProtocol
            this.startingOffsets = x.startingOffsets
            this.startingTimestamp = x.startingTimestamp
            this.subscribePattern = x.subscribePattern
            this.topicName = x.topicName
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.glue.model.KafkaStreamingSourceOptions = KafkaStreamingSourceOptions(this)

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy