commonMain.aws.sdk.kotlin.services.firehose.model.ParquetSerDe.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of firehose-jvm Show documentation
Show all versions of firehose-jvm Show documentation
The AWS SDK for Kotlin client for Firehose
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.firehose.model
import aws.smithy.kotlin.runtime.SdkDsl
/**
* A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://parquet.apache.org/docs/).
*/
public class ParquetSerDe private constructor(builder: Builder) {
/**
* The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/
public val blockSizeBytes: kotlin.Int? = builder.blockSizeBytes
/**
* The compression code to use over data blocks. The possible values are `UNCOMPRESSED`, `SNAPPY`, and `GZIP`, with the default being `SNAPPY`. Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.
*/
public val compression: aws.sdk.kotlin.services.firehose.model.ParquetCompression? = builder.compression
/**
* Indicates whether to enable dictionary compression.
*/
public val enableDictionaryCompression: kotlin.Boolean? = builder.enableDictionaryCompression
/**
* The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
*/
public val maxPaddingBytes: kotlin.Int? = builder.maxPaddingBytes
/**
* The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
*/
public val pageSizeBytes: kotlin.Int? = builder.pageSizeBytes
/**
* Indicates the version of row format to output. The possible values are `V1` and `V2`. The default is `V1`.
*/
public val writerVersion: aws.sdk.kotlin.services.firehose.model.ParquetWriterVersion? = builder.writerVersion
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.firehose.model.ParquetSerDe = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("ParquetSerDe(")
append("blockSizeBytes=$blockSizeBytes,")
append("compression=$compression,")
append("enableDictionaryCompression=$enableDictionaryCompression,")
append("maxPaddingBytes=$maxPaddingBytes,")
append("pageSizeBytes=$pageSizeBytes,")
append("writerVersion=$writerVersion")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = blockSizeBytes ?: 0
result = 31 * result + (compression?.hashCode() ?: 0)
result = 31 * result + (enableDictionaryCompression?.hashCode() ?: 0)
result = 31 * result + (maxPaddingBytes ?: 0)
result = 31 * result + (pageSizeBytes ?: 0)
result = 31 * result + (writerVersion?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as ParquetSerDe
if (blockSizeBytes != other.blockSizeBytes) return false
if (compression != other.compression) return false
if (enableDictionaryCompression != other.enableDictionaryCompression) return false
if (maxPaddingBytes != other.maxPaddingBytes) return false
if (pageSizeBytes != other.pageSizeBytes) return false
if (writerVersion != other.writerVersion) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.firehose.model.ParquetSerDe = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.
*/
public var blockSizeBytes: kotlin.Int? = null
/**
* The compression code to use over data blocks. The possible values are `UNCOMPRESSED`, `SNAPPY`, and `GZIP`, with the default being `SNAPPY`. Use `SNAPPY` for higher decompression speed. Use `GZIP` if the compression ratio is more important than speed.
*/
public var compression: aws.sdk.kotlin.services.firehose.model.ParquetCompression? = null
/**
* Indicates whether to enable dictionary compression.
*/
public var enableDictionaryCompression: kotlin.Boolean? = null
/**
* The maximum amount of padding to apply. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 0.
*/
public var maxPaddingBytes: kotlin.Int? = null
/**
* The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.
*/
public var pageSizeBytes: kotlin.Int? = null
/**
* Indicates the version of row format to output. The possible values are `V1` and `V2`. The default is `V1`.
*/
public var writerVersion: aws.sdk.kotlin.services.firehose.model.ParquetWriterVersion? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.firehose.model.ParquetSerDe) : this() {
this.blockSizeBytes = x.blockSizeBytes
this.compression = x.compression
this.enableDictionaryCompression = x.enableDictionaryCompression
this.maxPaddingBytes = x.maxPaddingBytes
this.pageSizeBytes = x.pageSizeBytes
this.writerVersion = x.writerVersion
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.firehose.model.ParquetSerDe = ParquetSerDe(this)
internal fun correctErrors(): Builder {
return this
}
}
}