All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.awsnative.kinesisfirehose.kotlin.inputs.DeliveryStreamOrcSerDeArgs.kt Maven / Gradle / Ivy

@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.awsnative.kinesisfirehose.kotlin.inputs

import com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamOrcSerDeArgs.builder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.Boolean
import kotlin.Double
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
import kotlin.jvm.JvmName

/**
 *
 * @property blockSizeBytes The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
 * @property bloomFilterColumns The column names for which you want Firehose to create bloom filters. The default is `null` .
 * @property bloomFilterFalsePositiveProbability The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
 * @property compression The compression code to use over data blocks. The default is `SNAPPY` .
 * @property dictionaryKeyThreshold Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
 * @property enablePadding Set this to `true` to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is `false` .
 * @property formatVersion The version of the file to write. The possible values are `V0_11` and `V0_12` . The default is `V0_12` .
 * @property paddingTolerance A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
 * For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
 * Kinesis Data Firehose ignores this parameter when `EnablePadding` is `false` .
 * @property rowIndexStride The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
 * @property stripeSizeBytes The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
 */
public data class DeliveryStreamOrcSerDeArgs(
    public val blockSizeBytes: Output? = null,
    public val bloomFilterColumns: Output>? = null,
    public val bloomFilterFalsePositiveProbability: Output? = null,
    public val compression: Output? = null,
    public val dictionaryKeyThreshold: Output? = null,
    public val enablePadding: Output? = null,
    public val formatVersion: Output? = null,
    public val paddingTolerance: Output? = null,
    public val rowIndexStride: Output? = null,
    public val stripeSizeBytes: Output? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamOrcSerDeArgs =
        com.pulumi.awsnative.kinesisfirehose.inputs.DeliveryStreamOrcSerDeArgs.builder()
            .blockSizeBytes(blockSizeBytes?.applyValue({ args0 -> args0 }))
            .bloomFilterColumns(bloomFilterColumns?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
            .bloomFilterFalsePositiveProbability(
                bloomFilterFalsePositiveProbability?.applyValue({ args0 ->
                    args0
                }),
            )
            .compression(compression?.applyValue({ args0 -> args0 }))
            .dictionaryKeyThreshold(dictionaryKeyThreshold?.applyValue({ args0 -> args0 }))
            .enablePadding(enablePadding?.applyValue({ args0 -> args0 }))
            .formatVersion(formatVersion?.applyValue({ args0 -> args0 }))
            .paddingTolerance(paddingTolerance?.applyValue({ args0 -> args0 }))
            .rowIndexStride(rowIndexStride?.applyValue({ args0 -> args0 }))
            .stripeSizeBytes(stripeSizeBytes?.applyValue({ args0 -> args0 })).build()
}

/**
 * Builder for [DeliveryStreamOrcSerDeArgs].
 */
@PulumiTagMarker
public class DeliveryStreamOrcSerDeArgsBuilder internal constructor() {
    private var blockSizeBytes: Output? = null

    private var bloomFilterColumns: Output>? = null

    private var bloomFilterFalsePositiveProbability: Output? = null

    private var compression: Output? = null

    private var dictionaryKeyThreshold: Output? = null

    private var enablePadding: Output? = null

    private var formatVersion: Output? = null

    private var paddingTolerance: Output? = null

    private var rowIndexStride: Output? = null

    private var stripeSizeBytes: Output? = null

    /**
     * @param value The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
     */
    @JvmName("rkwdahoecfnfjqyu")
    public suspend fun blockSizeBytes(`value`: Output) {
        this.blockSizeBytes = value
    }

    /**
     * @param value The column names for which you want Firehose to create bloom filters. The default is `null` .
     */
    @JvmName("ermuitsnnxmkwxva")
    public suspend fun bloomFilterColumns(`value`: Output>) {
        this.bloomFilterColumns = value
    }

    @JvmName("smtyneforpukiotc")
    public suspend fun bloomFilterColumns(vararg values: Output) {
        this.bloomFilterColumns = Output.all(values.asList())
    }

    /**
     * @param values The column names for which you want Firehose to create bloom filters. The default is `null` .
     */
    @JvmName("vdprdykjsubfncnf")
    public suspend fun bloomFilterColumns(values: List>) {
        this.bloomFilterColumns = Output.all(values)
    }

    /**
     * @param value The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
     */
    @JvmName("gkncsqgipaobleoe")
    public suspend fun bloomFilterFalsePositiveProbability(`value`: Output) {
        this.bloomFilterFalsePositiveProbability = value
    }

    /**
     * @param value The compression code to use over data blocks. The default is `SNAPPY` .
     */
    @JvmName("awdasmertgoxjujl")
    public suspend fun compression(`value`: Output) {
        this.compression = value
    }

    /**
     * @param value Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
     */
    @JvmName("hgxakjjjtokplaoi")
    public suspend fun dictionaryKeyThreshold(`value`: Output) {
        this.dictionaryKeyThreshold = value
    }

    /**
     * @param value Set this to `true` to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is `false` .
     */
    @JvmName("euygiyfoxbmjakex")
    public suspend fun enablePadding(`value`: Output) {
        this.enablePadding = value
    }

    /**
     * @param value The version of the file to write. The possible values are `V0_11` and `V0_12` . The default is `V0_12` .
     */
    @JvmName("ndfyafxlxpltbohp")
    public suspend fun formatVersion(`value`: Output) {
        this.formatVersion = value
    }

    /**
     * @param value A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
     * For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
     * Kinesis Data Firehose ignores this parameter when `EnablePadding` is `false` .
     */
    @JvmName("gdhgulxigyffsutf")
    public suspend fun paddingTolerance(`value`: Output) {
        this.paddingTolerance = value
    }

    /**
     * @param value The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
     */
    @JvmName("shugmmcfhegllqsi")
    public suspend fun rowIndexStride(`value`: Output) {
        this.rowIndexStride = value
    }

    /**
     * @param value The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
     */
    @JvmName("bssaobhshhrolfvv")
    public suspend fun stripeSizeBytes(`value`: Output) {
        this.stripeSizeBytes = value
    }

    /**
     * @param value The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
     */
    @JvmName("xnxfgpmljbhkhqik")
    public suspend fun blockSizeBytes(`value`: Int?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.blockSizeBytes = mapped
    }

    /**
     * @param value The column names for which you want Firehose to create bloom filters. The default is `null` .
     */
    @JvmName("icduhfdfqrirbuxy")
    public suspend fun bloomFilterColumns(`value`: List?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.bloomFilterColumns = mapped
    }

    /**
     * @param values The column names for which you want Firehose to create bloom filters. The default is `null` .
     */
    @JvmName("xqxcllamblbptxws")
    public suspend fun bloomFilterColumns(vararg values: String) {
        val toBeMapped = values.toList()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.bloomFilterColumns = mapped
    }

    /**
     * @param value The Bloom filter false positive probability (FPP). The lower the FPP, the bigger the Bloom filter. The default value is 0.05, the minimum is 0, and the maximum is 1.
     */
    @JvmName("xkbmbgbkvnpvwyfr")
    public suspend fun bloomFilterFalsePositiveProbability(`value`: Double?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.bloomFilterFalsePositiveProbability = mapped
    }

    /**
     * @param value The compression code to use over data blocks. The default is `SNAPPY` .
     */
    @JvmName("vnoercaterajhjft")
    public suspend fun compression(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.compression = mapped
    }

    /**
     * @param value Represents the fraction of the total number of non-null rows. To turn off dictionary encoding, set this fraction to a number that is less than the number of distinct keys in a dictionary. To always use dictionary encoding, set this threshold to 1.
     */
    @JvmName("fjbyebaggnggtxlg")
    public suspend fun dictionaryKeyThreshold(`value`: Double?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.dictionaryKeyThreshold = mapped
    }

    /**
     * @param value Set this to `true` to indicate that you want stripes to be padded to the HDFS block boundaries. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is `false` .
     */
    @JvmName("uhsupcixekurwmsn")
    public suspend fun enablePadding(`value`: Boolean?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.enablePadding = mapped
    }

    /**
     * @param value The version of the file to write. The possible values are `V0_11` and `V0_12` . The default is `V0_12` .
     */
    @JvmName("dugdukmoycxynxby")
    public suspend fun formatVersion(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.formatVersion = mapped
    }

    /**
     * @param value A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.
     * For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.
     * Kinesis Data Firehose ignores this parameter when `EnablePadding` is `false` .
     */
    @JvmName("ilqapyshsmjlhhkk")
    public suspend fun paddingTolerance(`value`: Double?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.paddingTolerance = mapped
    }

    /**
     * @param value The number of rows between index entries. The default is 10,000 and the minimum is 1,000.
     */
    @JvmName("bjdgdrtwrfpkpuvi")
    public suspend fun rowIndexStride(`value`: Int?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.rowIndexStride = mapped
    }

    /**
     * @param value The number of bytes in each stripe. The default is 64 MiB and the minimum is 8 MiB.
     */
    @JvmName("jrmidpckgmhidcgh")
    public suspend fun stripeSizeBytes(`value`: Int?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.stripeSizeBytes = mapped
    }

    internal fun build(): DeliveryStreamOrcSerDeArgs = DeliveryStreamOrcSerDeArgs(
        blockSizeBytes = blockSizeBytes,
        bloomFilterColumns = bloomFilterColumns,
        bloomFilterFalsePositiveProbability = bloomFilterFalsePositiveProbability,
        compression = compression,
        dictionaryKeyThreshold = dictionaryKeyThreshold,
        enablePadding = enablePadding,
        formatVersion = formatVersion,
        paddingTolerance = paddingTolerance,
        rowIndexStride = rowIndexStride,
        stripeSizeBytes = stripeSizeBytes,
    )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy