Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.dataproc.kotlin.inputs
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs.builder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.jvm.JvmName
/**
*
* @property autoscalingConfig Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
* @property encryptionConfig Encryption settings for the cluster.
* @property endpointConfig Port/endpoint configuration for this cluster
* @property gceClusterConfig The shared Compute Engine config settings for all instances in a cluster.
* @property gkeClusterConfig The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
* @property initializationActions Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
* @property lifecycleConfig Lifecycle setting for the cluster.
* @property masterConfig The Compute Engine config settings for additional worker instances in a cluster.
* @property metastoreConfig Metastore configuration.
* @property secondaryWorkerConfig The Compute Engine config settings for additional worker instances in a cluster.
* @property securityConfig Security settings for the cluster.
* @property softwareConfig The config settings for software inside the cluster.
* @property stagingBucket A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
* @property tempBucket A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket.
* @property workerConfig The Compute Engine config settings for additional worker instances in a cluster.
* - - -
*/
public data class WorkflowTemplatePlacementManagedClusterConfigArgs(
public val autoscalingConfig: Output? = null,
public val encryptionConfig: Output? = null,
public val endpointConfig: Output? = null,
public val gceClusterConfig: Output? = null,
public val gkeClusterConfig: Output? = null,
public val initializationActions: Output>? = null,
public val lifecycleConfig: Output? = null,
public val masterConfig: Output? =
null,
public val metastoreConfig: Output? = null,
public val secondaryWorkerConfig: Output? = null,
public val securityConfig: Output? = null,
public val softwareConfig: Output? = null,
public val stagingBucket: Output? = null,
public val tempBucket: Output? = null,
public val workerConfig: Output? =
null,
) :
ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs =
com.pulumi.gcp.dataproc.inputs.WorkflowTemplatePlacementManagedClusterConfigArgs.builder()
.autoscalingConfig(autoscalingConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.encryptionConfig(encryptionConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.endpointConfig(endpointConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.gceClusterConfig(gceClusterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.gkeClusterConfig(gkeClusterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.initializationActions(
initializationActions?.applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> args0.toJava() })
})
}),
)
.lifecycleConfig(lifecycleConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.masterConfig(masterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.metastoreConfig(metastoreConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.secondaryWorkerConfig(
secondaryWorkerConfig?.applyValue({ args0 ->
args0.let({ args0 ->
args0.toJava()
})
}),
)
.securityConfig(securityConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.softwareConfig(softwareConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.stagingBucket(stagingBucket?.applyValue({ args0 -> args0 }))
.tempBucket(tempBucket?.applyValue({ args0 -> args0 }))
.workerConfig(workerConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) })).build()
}
/**
* Builder for [WorkflowTemplatePlacementManagedClusterConfigArgs].
*/
@PulumiTagMarker
public class WorkflowTemplatePlacementManagedClusterConfigArgsBuilder internal constructor() {
private var autoscalingConfig:
Output? = null
private var encryptionConfig:
Output? = null
private var endpointConfig:
Output? = null
private var gceClusterConfig:
Output? = null
private var gkeClusterConfig:
Output? = null
private var initializationActions:
Output>? = null
private var lifecycleConfig:
Output? = null
private var masterConfig: Output? =
null
private var metastoreConfig:
Output? = null
private var secondaryWorkerConfig:
Output? = null
private var securityConfig:
Output? = null
private var softwareConfig:
Output? = null
private var stagingBucket: Output? = null
private var tempBucket: Output? = null
private var workerConfig: Output? =
null
/**
* @param value Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
*/
@JvmName("erqaaowreatitmgu")
public suspend fun autoscalingConfig(`value`: Output) {
this.autoscalingConfig = value
}
/**
* @param value Encryption settings for the cluster.
*/
@JvmName("wkjgyushtadxjoth")
public suspend fun encryptionConfig(`value`: Output) {
this.encryptionConfig = value
}
/**
* @param value Port/endpoint configuration for this cluster
*/
@JvmName("lhfibtkxydqrafyd")
public suspend fun endpointConfig(`value`: Output) {
this.endpointConfig = value
}
/**
* @param value The shared Compute Engine config settings for all instances in a cluster.
*/
@JvmName("yeuvylofwjwaylbu")
public suspend fun gceClusterConfig(`value`: Output) {
this.gceClusterConfig = value
}
/**
* @param value The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.
*/
@JvmName("nbbiqcpjqdakalub")
public suspend fun gkeClusterConfig(`value`: Output) {
this.gkeClusterConfig = value
}
/**
* @param value Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
*/
@JvmName("rknakvyvcvvybycn")
public suspend fun initializationActions(`value`: Output>) {
this.initializationActions = value
}
@JvmName("kkeejomgkdehmohr")
public suspend fun initializationActions(vararg values: Output) {
this.initializationActions = Output.all(values.asList())
}
/**
* @param values Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if ; then ... master specific actions ... else ... worker specific actions ... fi
*/
@JvmName("nbjgxgjshqimkeam")
public suspend fun initializationActions(values: List