com.pulumi.digitalocean.kotlin.outputs.DatabaseKafkaTopicConfig.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-digitalocean-kotlin Show documentation
Show all versions of pulumi-digitalocean-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.digitalocean.kotlin.outputs
import kotlin.Boolean
import kotlin.Double
import kotlin.Int
import kotlin.String
import kotlin.Suppress
/**
*
* @property cleanupPolicy The topic cleanup policy that describes whether messages should be deleted, compacted, or both when retention policies are violated.
* This may be one of "delete", "compact", or "compact_delete".
* @property compressionType The topic compression codecs used for a given topic.
* This may be one of "uncompressed", "gzip", "snappy", "lz4", "producer", "zstd". "uncompressed" indicates that there is no compression and "producer" retains the original compression codec set by the producer.
* @property deleteRetentionMs The amount of time, in ms, that deleted records are retained.
* @property fileDeleteDelayMs The amount of time, in ms, to wait before deleting a topic log segment from the filesystem.
* @property flushMessages The number of messages accumulated on a topic partition before they are flushed to disk.
* @property flushMs The maximum time, in ms, that a topic is kept in memory before being flushed to disk.
* @property indexIntervalBytes The interval, in bytes, in which entries are added to the offset index.
* @property maxCompactionLagMs The maximum time, in ms, that a particular message will remain uncompacted. This will not apply if the `compression_type` is set to "uncompressed" or it is set to `producer` and the producer is not using compression.
* @property maxMessageBytes The maximum size, in bytes, of a message.
* @property messageDownConversionEnable Determines whether down-conversion of message formats for consumers is enabled.
* @property messageFormatVersion The version of the inter-broker protocol that will be used. This may be one of "0.8.0", "0.8.1", "0.8.2", "0.9.0", "0.10.0", "0.10.0-IV0", "0.10.0-IV1", "0.10.1", "0.10.1-IV0", "0.10.1-IV1", "0.10.1-IV2", "0.10.2", "0.10.2-IV0", "0.11.0", "0.11.0-IV0", "0.11.0-IV1", "0.11.0-IV2", "1.0", "1.0-IV0", "1.1", "1.1-IV0", "2.0", "2.0-IV0", "2.0-IV1", "2.1", "2.1-IV0", "2.1-IV1", "2.1-IV2", "2.2", "2.2-IV0", "2.2-IV1", "2.3", "2.3-IV0", "2.3-IV1", "2.4", "2.4-IV0", "2.4-IV1", "2.5", "2.5-IV0", "2.6", "2.6-IV0", "2.7", "2.7-IV0", "2.7-IV1", "2.7-IV2", "2.8", "2.8-IV0", "2.8-IV1", "3.0", "3.0-IV0", "3.0-IV1", "3.1", "3.1-IV0", "3.2", "3.2-IV0", "3.3", "3.3-IV0", "3.3-IV1", "3.3-IV2", "3.3-IV3", "3.4", "3.4-IV0", "3.5", "3.5-IV0", "3.5-IV1", "3.5-IV2", "3.6", "3.6-IV0", "3.6-IV1", "3.6-IV2".
* @property messageTimestampDifferenceMaxMs The maximum difference, in ms, between the timestamp specific in a message and when the broker receives the message.
* @property messageTimestampType Specifies which timestamp to use for the message. This may be one of "create_time" or "log_append_time".
* @property minCleanableDirtyRatio A scale between 0.0 and 1.0 which controls the frequency of the compactor. Larger values mean more frequent compactions. This is often paired with `max_compaction_lag_ms` to control the compactor frequency.
* @property minCompactionLagMs
* @property minInsyncReplicas The number of replicas that must acknowledge a write before it is considered successful. -1 is a special setting to indicate that all nodes must ack a message before a write is considered successful. Default is 1, indicating at least 1 replica must acknowledge a write to be considered successful.
* @property preallocate Determines whether to preallocate a file on disk when creating a new log segment within a topic.
* @property retentionBytes The maximum size, in bytes, of a topic before messages are deleted. -1 is a special setting indicating that this setting has no limit.
* @property retentionMs The maximum time, in ms, that a topic log file is retained before deleting it. -1 is a special setting indicating that this setting has no limit.
* @property segmentBytes The maximum size, in bytes, of a single topic log file.
* @property segmentIndexBytes The maximum size, in bytes, of the offset index.
* @property segmentJitterMs The maximum time, in ms, subtracted from the scheduled segment disk flush time to avoid the thundering herd problem for segment flushing.
* @property segmentMs The maximum time, in ms, before the topic log will flush to disk.
*/
public data class DatabaseKafkaTopicConfig(
public val cleanupPolicy: String? = null,
public val compressionType: String? = null,
public val deleteRetentionMs: String? = null,
public val fileDeleteDelayMs: String? = null,
public val flushMessages: String? = null,
public val flushMs: String? = null,
public val indexIntervalBytes: String? = null,
public val maxCompactionLagMs: String? = null,
public val maxMessageBytes: String? = null,
public val messageDownConversionEnable: Boolean? = null,
public val messageFormatVersion: String? = null,
public val messageTimestampDifferenceMaxMs: String? = null,
public val messageTimestampType: String? = null,
public val minCleanableDirtyRatio: Double? = null,
public val minCompactionLagMs: String? = null,
public val minInsyncReplicas: Int? = null,
public val preallocate: Boolean? = null,
public val retentionBytes: String? = null,
public val retentionMs: String? = null,
public val segmentBytes: String? = null,
public val segmentIndexBytes: String? = null,
public val segmentJitterMs: String? = null,
public val segmentMs: String? = null,
) {
public companion object {
public fun toKotlin(javaType: com.pulumi.digitalocean.outputs.DatabaseKafkaTopicConfig): DatabaseKafkaTopicConfig = DatabaseKafkaTopicConfig(
cleanupPolicy = javaType.cleanupPolicy().map({ args0 -> args0 }).orElse(null),
compressionType = javaType.compressionType().map({ args0 -> args0 }).orElse(null),
deleteRetentionMs = javaType.deleteRetentionMs().map({ args0 -> args0 }).orElse(null),
fileDeleteDelayMs = javaType.fileDeleteDelayMs().map({ args0 -> args0 }).orElse(null),
flushMessages = javaType.flushMessages().map({ args0 -> args0 }).orElse(null),
flushMs = javaType.flushMs().map({ args0 -> args0 }).orElse(null),
indexIntervalBytes = javaType.indexIntervalBytes().map({ args0 -> args0 }).orElse(null),
maxCompactionLagMs = javaType.maxCompactionLagMs().map({ args0 -> args0 }).orElse(null),
maxMessageBytes = javaType.maxMessageBytes().map({ args0 -> args0 }).orElse(null),
messageDownConversionEnable = javaType.messageDownConversionEnable().map({ args0 ->
args0
}).orElse(null),
messageFormatVersion = javaType.messageFormatVersion().map({ args0 -> args0 }).orElse(null),
messageTimestampDifferenceMaxMs = javaType.messageTimestampDifferenceMaxMs().map({ args0 ->
args0
}).orElse(null),
messageTimestampType = javaType.messageTimestampType().map({ args0 -> args0 }).orElse(null),
minCleanableDirtyRatio = javaType.minCleanableDirtyRatio().map({ args0 -> args0 }).orElse(null),
minCompactionLagMs = javaType.minCompactionLagMs().map({ args0 -> args0 }).orElse(null),
minInsyncReplicas = javaType.minInsyncReplicas().map({ args0 -> args0 }).orElse(null),
preallocate = javaType.preallocate().map({ args0 -> args0 }).orElse(null),
retentionBytes = javaType.retentionBytes().map({ args0 -> args0 }).orElse(null),
retentionMs = javaType.retentionMs().map({ args0 -> args0 }).orElse(null),
segmentBytes = javaType.segmentBytes().map({ args0 -> args0 }).orElse(null),
segmentIndexBytes = javaType.segmentIndexBytes().map({ args0 -> args0 }).orElse(null),
segmentJitterMs = javaType.segmentJitterMs().map({ args0 -> args0 }).orElse(null),
segmentMs = javaType.segmentMs().map({ args0 -> args0 }).orElse(null),
)
}
}