com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.ClusterConfigResponse.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
/**
* The cluster config.
* @property autoscalingConfig Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
* @property configBucket Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging bucket (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.
* @property encryptionConfig Optional. Encryption settings for the cluster.
* @property endpointConfig Optional. Port/endpoint configuration for this cluster
* @property gceClusterConfig Optional. The shared Compute Engine config settings for all instances in a cluster.
* @property gkeClusterConfig Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
* @property initializationActions Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
* @property lifecycleConfig Optional. The config setting for auto delete cluster schedule.
* @property masterConfig Optional. The Compute Engine config settings for the master instance in a cluster.
* @property metastoreConfig Optional. Metastore configuration.
* @property secondaryWorkerConfig Optional. The Compute Engine config settings for additional worker instances in a cluster.
* @property securityConfig Optional. Security related configuration.
* @property softwareConfig Optional. The config settings for software inside the cluster.
* @property tempBucket Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.
* @property workerConfig Optional. The Compute Engine config settings for worker instances in a cluster.
*/
public data class ClusterConfigResponse(
public val autoscalingConfig: AutoscalingConfigResponse,
public val configBucket: String,
public val encryptionConfig: EncryptionConfigResponse,
public val endpointConfig: EndpointConfigResponse,
public val gceClusterConfig: GceClusterConfigResponse,
public val gkeClusterConfig: GkeClusterConfigResponse,
public val initializationActions: List,
public val lifecycleConfig: LifecycleConfigResponse,
public val masterConfig: InstanceGroupConfigResponse,
public val metastoreConfig: MetastoreConfigResponse,
public val secondaryWorkerConfig: InstanceGroupConfigResponse,
public val securityConfig: SecurityConfigResponse,
public val softwareConfig: SoftwareConfigResponse,
public val tempBucket: String,
public val workerConfig: InstanceGroupConfigResponse,
) {
public companion object {
public fun toKotlin(javaType: com.pulumi.googlenative.dataproc.v1beta2.outputs.ClusterConfigResponse): ClusterConfigResponse = ClusterConfigResponse(
autoscalingConfig = javaType.autoscalingConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.AutoscalingConfigResponse.Companion.toKotlin(args0)
}),
configBucket = javaType.configBucket(),
encryptionConfig = javaType.encryptionConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.EncryptionConfigResponse.Companion.toKotlin(args0)
}),
endpointConfig = javaType.endpointConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.EndpointConfigResponse.Companion.toKotlin(args0)
}),
gceClusterConfig = javaType.gceClusterConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.GceClusterConfigResponse.Companion.toKotlin(args0)
}),
gkeClusterConfig = javaType.gkeClusterConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.GkeClusterConfigResponse.Companion.toKotlin(args0)
}),
initializationActions = javaType.initializationActions().map({ args0 ->
args0.let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.NodeInitializationActionResponse.Companion.toKotlin(args0)
})
}),
lifecycleConfig = javaType.lifecycleConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.LifecycleConfigResponse.Companion.toKotlin(args0)
}),
masterConfig = javaType.masterConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.InstanceGroupConfigResponse.Companion.toKotlin(args0)
}),
metastoreConfig = javaType.metastoreConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.MetastoreConfigResponse.Companion.toKotlin(args0)
}),
secondaryWorkerConfig = javaType.secondaryWorkerConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.InstanceGroupConfigResponse.Companion.toKotlin(args0)
}),
securityConfig = javaType.securityConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.SecurityConfigResponse.Companion.toKotlin(args0)
}),
softwareConfig = javaType.softwareConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.SoftwareConfigResponse.Companion.toKotlin(args0)
}),
tempBucket = javaType.tempBucket(),
workerConfig = javaType.workerConfig().let({ args0 ->
com.pulumi.googlenative.dataproc.v1beta2.kotlin.outputs.InstanceGroupConfigResponse.Companion.toKotlin(args0)
}),
)
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy