
com.pulumi.awsnative.kinesisfirehose.kotlin.inputs.DeliveryStreamParquetSerDeArgs.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.awsnative.kinesisfirehose.kotlin.inputs
import com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamParquetSerDeArgs.builder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.Boolean
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.jvm.JvmName
/**
*
* @property blockSizeBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
* @property compression The compression code to use over data blocks. The possible values are `UNCOMPRESSED` , `SNAPPY` , and `GZIP` , with the default being `SNAPPY` . Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.
* @property enableDictionaryCompression Indicates whether to enable dictionary compression.
* @property maxPaddingBytes The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
* @property pageSizeBytes The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
* @property writerVersion Indicates the version of row format to output. The possible values are `V1` and `V2` . The default is `V1` .
*/
public data class DeliveryStreamParquetSerDeArgs(
public val blockSizeBytes: Output? = null,
public val compression: Output? = null,
public val enableDictionaryCompression: Output? = null,
public val maxPaddingBytes: Output? = null,
public val pageSizeBytes: Output? = null,
public val writerVersion: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamParquetSerDeArgs = com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamParquetSerDeArgs.builder()
.blockSizeBytes(blockSizeBytes?.applyValue({ args0 -> args0 }))
.compression(compression?.applyValue({ args0 -> args0 }))
.enableDictionaryCompression(enableDictionaryCompression?.applyValue({ args0 -> args0 }))
.maxPaddingBytes(maxPaddingBytes?.applyValue({ args0 -> args0 }))
.pageSizeBytes(pageSizeBytes?.applyValue({ args0 -> args0 }))
.writerVersion(writerVersion?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [DeliveryStreamParquetSerDeArgs].
*/
@PulumiTagMarker
public class DeliveryStreamParquetSerDeArgsBuilder internal constructor() {
private var blockSizeBytes: Output? = null
private var compression: Output? = null
private var enableDictionaryCompression: Output? = null
private var maxPaddingBytes: Output? = null
private var pageSizeBytes: Output? = null
private var writerVersion: Output? = null
/**
* @param value The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/
@JvmName("nacqnohxvdoyvuhl")
public suspend fun blockSizeBytes(`value`: Output) {
this.blockSizeBytes = value
}
/**
* @param value The compression code to use over data blocks. The possible values are `UNCOMPRESSED` , `SNAPPY` , and `GZIP` , with the default being `SNAPPY` . Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.
*/
@JvmName("edvmkfxepardqngi")
public suspend fun compression(`value`: Output) {
this.compression = value
}
/**
* @param value Indicates whether to enable dictionary compression.
*/
@JvmName("ykbkntbidnhxswvd")
public suspend fun enableDictionaryCompression(`value`: Output) {
this.enableDictionaryCompression = value
}
/**
* @param value The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
*/
@JvmName("lalrkecmvvlgcgmf")
public suspend fun maxPaddingBytes(`value`: Output) {
this.maxPaddingBytes = value
}
/**
* @param value The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
*/
@JvmName("ocqgaopbriwqjcvt")
public suspend fun pageSizeBytes(`value`: Output) {
this.pageSizeBytes = value
}
/**
* @param value Indicates the version of row format to output. The possible values are `V1` and `V2` . The default is `V1` .
*/
@JvmName("xxghnkxhinyfhauj")
public suspend fun writerVersion(`value`: Output) {
this.writerVersion = value
}
/**
* @param value The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/
@JvmName("hhjiykmiurflrwid")
public suspend fun blockSizeBytes(`value`: Int?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.blockSizeBytes = mapped
}
/**
* @param value The compression code to use over data blocks. The possible values are `UNCOMPRESSED` , `SNAPPY` , and `GZIP` , with the default being `SNAPPY` . Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.
*/
@JvmName("pjqsrhlyiyyagnii")
public suspend fun compression(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.compression = mapped
}
/**
* @param value Indicates whether to enable dictionary compression.
*/
@JvmName("ikcbubmdupqpnyqb")
public suspend fun enableDictionaryCompression(`value`: Boolean?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.enableDictionaryCompression = mapped
}
/**
* @param value The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
*/
@JvmName("rmwrruoohkhjjawj")
public suspend fun maxPaddingBytes(`value`: Int?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.maxPaddingBytes = mapped
}
/**
* @param value The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
*/
@JvmName("htayuimnysedfihj")
public suspend fun pageSizeBytes(`value`: Int?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.pageSizeBytes = mapped
}
/**
* @param value Indicates the version of row format to output. The possible values are `V1` and `V2` . The default is `V1` .
*/
@JvmName("tleljalbrgmuyjnj")
public suspend fun writerVersion(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.writerVersion = mapped
}
internal fun build(): DeliveryStreamParquetSerDeArgs = DeliveryStreamParquetSerDeArgs(
blockSizeBytes = blockSizeBytes,
compression = compression,
enableDictionaryCompression = enableDictionaryCompression,
maxPaddingBytes = maxPaddingBytes,
pageSizeBytes = pageSizeBytes,
writerVersion = writerVersion,
)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy