All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataproc.kotlin.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.10.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataproc.kotlin.inputs

import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs.builder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.jvm.JvmName

/**
 *
 * @property autoscalingConfig Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
 * @property encryptionConfig Encryption settings for the cluster.
 * @property endpointConfig Port/endpoint configuration for this cluster
 * @property gceClusterConfig The shared Compute Engine config settings for all instances in a cluster.
 * @property gkeClusterConfig The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
 * @property initializationActions Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
 * @property lifecycleConfig Lifecycle setting for the cluster.
 * @property masterConfig The Compute Engine config settings for additional worker instances in a cluster.
 * @property metastoreConfig Metastore configuration.
 * @property secondaryWorkerConfig The Compute Engine config settings for additional worker instances in a cluster.
 * @property securityConfig Security settings for the cluster.
 * @property softwareConfig The config settings for software inside the cluster.
 * @property stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
 * @property tempBucket A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
 * @property workerConfig The Compute Engine config settings for additional worker instances in a cluster.
 * - - -
 */
public data class WorkflowTemplatePlacementManagedClusterConfigArgs(
    public val autoscalingConfig: Output? = null,
    public val encryptionConfig: Output? = null,
    public val endpointConfig: Output? = null,
    public val gceClusterConfig: Output? = null,
    public val gkeClusterConfig: Output? = null,
    public val initializationActions: Output>? = null,
    public val lifecycleConfig: Output? = null,
    public val masterConfig: Output? =
        null,
    public val metastoreConfig: Output? = null,
    public val secondaryWorkerConfig: Output? = null,
    public val securityConfig: Output? = null,
    public val softwareConfig: Output? = null,
    public val stagingBucket: Output? = null,
    public val tempBucket: Output? = null,
    public val workerConfig: Output? =
        null,
) :
    ConvertibleToJava {
    override fun toJava(): com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs =
        com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
            .autoscalingConfig(autoscalingConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .encryptionConfig(encryptionConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .endpointConfig(endpointConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .gceClusterConfig(gceClusterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .gkeClusterConfig(gkeClusterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .initializationActions(
                initializationActions?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.let({ args0 -> args0.toJava() })
                    })
                }),
            )
            .lifecycleConfig(lifecycleConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .masterConfig(masterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .metastoreConfig(metastoreConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .secondaryWorkerConfig(
                secondaryWorkerConfig?.applyValue({ args0 ->
                    args0.let({ args0 ->
                        args0.toJava()
                    })
                }),
            )
            .securityConfig(securityConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .softwareConfig(softwareConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .stagingBucket(stagingBucket?.applyValue({ args0 -> args0 }))
            .tempBucket(tempBucket?.applyValue({ args0 -> args0 }))
            .workerConfig(workerConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) })).build()
}

/**
 * Builder for [WorkflowTemplatePlacementManagedClusterConfigArgs].
 */
@PulumiTagMarker
public class WorkflowTemplatePlacementManagedClusterConfigArgsBuilder internal constructor() {
    private var autoscalingConfig:
        Output? = null

    private var encryptionConfig:
        Output? = null

    private var endpointConfig:
        Output? = null

    private var gceClusterConfig:
        Output? = null

    private var gkeClusterConfig:
        Output? = null

    private var initializationActions:
        Output>? = null

    private var lifecycleConfig:
        Output? = null

    private var masterConfig: Output? =
        null

    private var metastoreConfig:
        Output? = null

    private var secondaryWorkerConfig:
        Output? = null

    private var securityConfig:
        Output? = null

    private var softwareConfig:
        Output? = null

    private var stagingBucket: Output? = null

    private var tempBucket: Output? = null

    private var workerConfig: Output? =
        null

    /**
     * @param value Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
     */
    @JvmName("erqaaowreatitmgu")
    public suspend fun autoscalingConfig(`value`: Output) {
        this.autoscalingConfig = value
    }

    /**
     * @param value Encryption settings for the cluster.
     */
    @JvmName("wkjgyushtadxjoth")
    public suspend fun encryptionConfig(`value`: Output) {
        this.encryptionConfig = value
    }

    /**
     * @param value Port/endpoint configuration for this cluster
     */
    @JvmName("lhfibtkxydqrafyd")
    public suspend fun endpointConfig(`value`: Output) {
        this.endpointConfig = value
    }

    /**
     * @param value The shared Compute Engine config settings for all instances in a cluster.
     */
    @JvmName("yeuvylofwjwaylbu")
    public suspend fun gceClusterConfig(`value`: Output) {
        this.gceClusterConfig = value
    }

    /**
     * @param value The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
     */
    @JvmName("nbbiqcpjqdakalub")
    public suspend fun gkeClusterConfig(`value`: Output) {
        this.gkeClusterConfig = value
    }

    /**
     * @param value Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("rknakvyvcvvybycn")
    public suspend fun initializationActions(`value`: Output>) {
        this.initializationActions = value
    }

    @JvmName("kkeejomgkdehmohr")
    public suspend fun initializationActions(vararg values: Output) {
        this.initializationActions = Output.all(values.asList())
    }

    /**
     * @param values Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("nbjgxgjshqimkeam")
    public suspend fun initializationActions(values: List>) {
        this.initializationActions = Output.all(values)
    }

    /**
     * @param value Lifecycle setting for the cluster.
     */
    @JvmName("agmjigjjtewvkfku")
    public suspend fun lifecycleConfig(`value`: Output) {
        this.lifecycleConfig = value
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("mxqulwgauuyejolt")
    public suspend fun masterConfig(`value`: Output) {
        this.masterConfig = value
    }

    /**
     * @param value Metastore configuration.
     */
    @JvmName("qkugexhsohnqixfp")
    public suspend fun metastoreConfig(`value`: Output) {
        this.metastoreConfig = value
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("tepbjgrqxqqsswrr")
    public suspend fun secondaryWorkerConfig(`value`: Output) {
        this.secondaryWorkerConfig = value
    }

    /**
     * @param value Security settings for the cluster.
     */
    @JvmName("tygifloxvtkbqapp")
    public suspend fun securityConfig(`value`: Output) {
        this.securityConfig = value
    }

    /**
     * @param value The config settings for software inside the cluster.
     */
    @JvmName("xsphkukftchinvoh")
    public suspend fun softwareConfig(`value`: Output) {
        this.softwareConfig = value
    }

    /**
     * @param value A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
     */
    @JvmName("hfcrxfufncbnciju")
    public suspend fun stagingBucket(`value`: Output) {
        this.stagingBucket = value
    }

    /**
     * @param value A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
     */
    @JvmName("cnufjauyhxvdmnoc")
    public suspend fun tempBucket(`value`: Output) {
        this.tempBucket = value
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     * - - -
     */
    @JvmName("brlvlqijddocgeow")
    public suspend fun workerConfig(`value`: Output) {
        this.workerConfig = value
    }

    /**
     * @param value Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
     */
    @JvmName("bsggdfjfmhuojqfv")
    public suspend fun autoscalingConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.autoscalingConfig = mapped
    }

    /**
     * @param argument Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
     */
    @JvmName("npkjjtbdminnceer")
    public suspend fun autoscalingConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.autoscalingConfig = mapped
    }

    /**
     * @param value Encryption settings for the cluster.
     */
    @JvmName("wmlchfpiocrhnure")
    public suspend fun encryptionConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.encryptionConfig = mapped
    }

    /**
     * @param argument Encryption settings for the cluster.
     */
    @JvmName("twsoonlyshrtmrcd")
    public suspend fun encryptionConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigEncryptionConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.encryptionConfig = mapped
    }

    /**
     * @param value Port/endpoint configuration for this cluster
     */
    @JvmName("egkbvfipmjupcjef")
    public suspend fun endpointConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.endpointConfig = mapped
    }

    /**
     * @param argument Port/endpoint configuration for this cluster
     */
    @JvmName("bpakvtannybhjxts")
    public suspend fun endpointConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigEndpointConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.endpointConfig = mapped
    }

    /**
     * @param value The shared Compute Engine config settings for all instances in a cluster.
     */
    @JvmName("ipghxpfmxebtcxaw")
    public suspend fun gceClusterConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.gceClusterConfig = mapped
    }

    /**
     * @param argument The shared Compute Engine config settings for all instances in a cluster.
     */
    @JvmName("urlkvvpvumqvuqbh")
    public suspend fun gceClusterConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigGceClusterConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.gceClusterConfig = mapped
    }

    /**
     * @param value The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
     */
    @JvmName("wpsurfxpcbpkifqe")
    public suspend fun gkeClusterConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.gkeClusterConfig = mapped
    }

    /**
     * @param argument The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
     */
    @JvmName("tcepnwiqtubvwqfr")
    public suspend fun gkeClusterConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.gkeClusterConfig = mapped
    }

    /**
     * @param value Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("ragutoxkcnovjaem")
    public suspend fun initializationActions(`value`: List?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.initializationActions = mapped
    }

    /**
     * @param argument Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("nancvygajakendwp")
    public suspend fun initializationActions(argument: List Unit>) {
        val toBeMapped = argument.toList().map {
            WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.initializationActions = mapped
    }

    /**
     * @param argument Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("cmutpjrilsiewkfd")
    public suspend fun initializationActions(vararg argument: suspend WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsBuilder.() -> Unit) {
        val toBeMapped = argument.toList().map {
            WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.initializationActions = mapped
    }

    /**
     * @param argument Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("duvhiilmwnajwjlg")
    public suspend fun initializationActions(argument: suspend WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsBuilder.() -> Unit) {
        val toBeMapped =
            listOf(
                WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgsBuilder().applySuspend
                    { argument() }.build(),
            )
        val mapped = of(toBeMapped)
        this.initializationActions = mapped
    }

    /**
     * @param values Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
     */
    @JvmName("jtloqkqawfhmfqvr")
    public suspend fun initializationActions(vararg values: WorkflowTemplatePlacementManagedClusterConfigInitializationActionArgs) {
        val toBeMapped = values.toList()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.initializationActions = mapped
    }

    /**
     * @param value Lifecycle setting for the cluster.
     */
    @JvmName("arfycsmkyrpwfodj")
    public suspend fun lifecycleConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.lifecycleConfig = mapped
    }

    /**
     * @param argument Lifecycle setting for the cluster.
     */
    @JvmName("nchfottjvmpraudg")
    public suspend fun lifecycleConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigLifecycleConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.lifecycleConfig = mapped
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("uclxadxvotyqvwrw")
    public suspend fun masterConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.masterConfig = mapped
    }

    /**
     * @param argument The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("toogxoyuytmspdog")
    public suspend fun masterConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigMasterConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.masterConfig = mapped
    }

    /**
     * @param value Metastore configuration.
     */
    @JvmName("qnwxynkoagoviyqa")
    public suspend fun metastoreConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.metastoreConfig = mapped
    }

    /**
     * @param argument Metastore configuration.
     */
    @JvmName("bfeawmwqariitigy")
    public suspend fun metastoreConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigMetastoreConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.metastoreConfig = mapped
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("iavjioliwrrmpaob")
    public suspend fun secondaryWorkerConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.secondaryWorkerConfig = mapped
    }

    /**
     * @param argument The Compute Engine config settings for additional worker instances in a cluster.
     */
    @JvmName("arxqxudxqqdtwkna")
    public suspend fun secondaryWorkerConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.secondaryWorkerConfig = mapped
    }

    /**
     * @param value Security settings for the cluster.
     */
    @JvmName("tqpouqmfhaqpetsp")
    public suspend fun securityConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.securityConfig = mapped
    }

    /**
     * @param argument Security settings for the cluster.
     */
    @JvmName("tkakurpkwkrsnkod")
    public suspend fun securityConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigSecurityConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.securityConfig = mapped
    }

    /**
     * @param value The config settings for software inside the cluster.
     */
    @JvmName("drqcklelqpuwbcwg")
    public suspend fun softwareConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.softwareConfig = mapped
    }

    /**
     * @param argument The config settings for software inside the cluster.
     */
    @JvmName("tvxqemdiwdrhocqj")
    public suspend fun softwareConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigSoftwareConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.softwareConfig = mapped
    }

    /**
     * @param value A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
     */
    @JvmName("xhhqonqnayqveogb")
    public suspend fun stagingBucket(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.stagingBucket = mapped
    }

    /**
     * @param value A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
     */
    @JvmName("hqgyowdxkyebrxqb")
    public suspend fun tempBucket(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.tempBucket = mapped
    }

    /**
     * @param value The Compute Engine config settings for additional worker instances in a cluster.
     * - - -
     */
    @JvmName("pfrqbioyqnqyepoc")
    public suspend fun workerConfig(`value`: WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.workerConfig = mapped
    }

    /**
     * @param argument The Compute Engine config settings for additional worker instances in a cluster.
     * - - -
     */
    @JvmName("sjkidhynpyghkddg")
    public suspend fun workerConfig(argument: suspend WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgsBuilder.() -> Unit) {
        val toBeMapped =
            WorkflowTemplatePlacementManagedClusterConfigWorkerConfigArgsBuilder().applySuspend {
                argument()
            }.build()
        val mapped = of(toBeMapped)
        this.workerConfig = mapped
    }

    internal fun build(): WorkflowTemplatePlacementManagedClusterConfigArgs =
        WorkflowTemplatePlacementManagedClusterConfigArgs(
            autoscalingConfig = autoscalingConfig,
            encryptionConfig = encryptionConfig,
            endpointConfig = endpointConfig,
            gceClusterConfig = gceClusterConfig,
            gkeClusterConfig = gkeClusterConfig,
            initializationActions = initializationActions,
            lifecycleConfig = lifecycleConfig,
            masterConfig = masterConfig,
            metastoreConfig = metastoreConfig,
            secondaryWorkerConfig = secondaryWorkerConfig,
            securityConfig = securityConfig,
            softwareConfig = softwareConfig,
            stagingBucket = stagingBucket,
            tempBucket = tempBucket,
            workerConfig = workerConfig,
        )
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy