All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.glue.model.SparkSql.kt Maven / Gradle / Ivy

// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.glue.model

import aws.smithy.kotlin.runtime.SdkDsl

/**
 * Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single `DynamicFrame`.
 */
public class SparkSql private constructor(builder: Builder) {
    /**
     * The data inputs identified by their node names. You can associate a table name with each input node to use in the SQL query. The name you choose must meet the Spark SQL naming restrictions.
     */
    public val inputs: List = requireNotNull(builder.inputs) { "A non-null value must be provided for inputs" }
    /**
     * The name of the transform node.
     */
    public val name: kotlin.String = requireNotNull(builder.name) { "A non-null value must be provided for name" }
    /**
     * Specifies the data schema for the SparkSQL transform.
     */
    public val outputSchemas: List? = builder.outputSchemas
    /**
     * A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named "MyDataSource". If you specify `From` as MyDataSource, and `Alias` as SqlName, then in your SQL you can do:
     *
     * `select * from SqlName`
     *
     * and that gets data from MyDataSource.
     */
    public val sqlAliases: List = requireNotNull(builder.sqlAliases) { "A non-null value must be provided for sqlAliases" }
    /**
     * A SQL query that must use Spark SQL syntax and return a single data set.
     */
    public val sqlQuery: kotlin.String = requireNotNull(builder.sqlQuery) { "A non-null value must be provided for sqlQuery" }

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.glue.model.SparkSql = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("SparkSql(")
        append("inputs=$inputs,")
        append("name=$name,")
        append("outputSchemas=$outputSchemas,")
        append("sqlAliases=$sqlAliases,")
        append("sqlQuery=$sqlQuery")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = inputs.hashCode()
        result = 31 * result + (name.hashCode())
        result = 31 * result + (outputSchemas?.hashCode() ?: 0)
        result = 31 * result + (sqlAliases.hashCode())
        result = 31 * result + (sqlQuery.hashCode())
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as SparkSql

        if (inputs != other.inputs) return false
        if (name != other.name) return false
        if (outputSchemas != other.outputSchemas) return false
        if (sqlAliases != other.sqlAliases) return false
        if (sqlQuery != other.sqlQuery) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.glue.model.SparkSql = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * The data inputs identified by their node names. You can associate a table name with each input node to use in the SQL query. The name you choose must meet the Spark SQL naming restrictions.
         */
        public var inputs: List? = null
        /**
         * The name of the transform node.
         */
        public var name: kotlin.String? = null
        /**
         * Specifies the data schema for the SparkSQL transform.
         */
        public var outputSchemas: List? = null
        /**
         * A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named "MyDataSource". If you specify `From` as MyDataSource, and `Alias` as SqlName, then in your SQL you can do:
         *
         * `select * from SqlName`
         *
         * and that gets data from MyDataSource.
         */
        public var sqlAliases: List? = null
        /**
         * A SQL query that must use Spark SQL syntax and return a single data set.
         */
        public var sqlQuery: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.glue.model.SparkSql) : this() {
            this.inputs = x.inputs
            this.name = x.name
            this.outputSchemas = x.outputSchemas
            this.sqlAliases = x.sqlAliases
            this.sqlQuery = x.sqlQuery
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.glue.model.SparkSql = SparkSql(this)

        internal fun correctErrors(): Builder {
            if (inputs == null) inputs = emptyList()
            if (name == null) name = ""
            if (sqlAliases == null) sqlAliases = emptyList()
            if (sqlQuery == null) sqlQuery = ""
            return this
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy