All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataproc.kotlin.ClusterArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.10.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataproc.kotlin

import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.ClusterArgs.builder
import com.pulumi.gcp.dataproc.kotlin.inputs.ClusterClusterConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.ClusterClusterConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.ClusterVirtualClusterConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.ClusterVirtualClusterConfigArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 * Manages a Cloud Dataproc cluster resource within GCP.
 * * [API documentation](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters)
 * * How-to Guides
 *     * [Official Documentation](https://cloud.google.com/dataproc/docs)
 * !> **Warning:** Due to limitations of the API, all arguments except
 * `labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing `cluster_config.worker_config.min_num_instances` will be ignored. Changing others will cause recreation of the
 * whole cluster!
 * ## Example Usage
 * ### Basic
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const simplecluster = new gcp.dataproc.Cluster("simplecluster", {
 *     name: "simplecluster",
 *     region: "us-central1",
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * simplecluster = gcp.dataproc.Cluster("simplecluster",
 *     name="simplecluster",
 *     region="us-central1")
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var simplecluster = new Gcp.Dataproc.Cluster("simplecluster", new()
 *     {
 *         Name = "simplecluster",
 *         Region = "us-central1",
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewCluster(ctx, "simplecluster", &dataproc.ClusterArgs{
 * 			Name:   pulumi.String("simplecluster"),
 * 			Region: pulumi.String("us-central1"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Cluster;
 * import com.pulumi.gcp.dataproc.ClusterArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var simplecluster = new Cluster("simplecluster", ClusterArgs.builder()
 *             .name("simplecluster")
 *             .region("us-central1")
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   simplecluster:
 *     type: gcp:dataproc:Cluster
 *     properties:
 *       name: simplecluster
 *       region: us-central1
 * ```
 * 
 * ### Advanced
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const _default = new gcp.serviceaccount.Account("default", {
 *     accountId: "service-account-id",
 *     displayName: "Service Account",
 * });
 * const mycluster = new gcp.dataproc.Cluster("mycluster", {
 *     name: "mycluster",
 *     region: "us-central1",
 *     gracefulDecommissionTimeout: "120s",
 *     labels: {
 *         foo: "bar",
 *     },
 *     clusterConfig: {
 *         stagingBucket: "dataproc-staging-bucket",
 *         masterConfig: {
 *             numInstances: 1,
 *             machineType: "e2-medium",
 *             diskConfig: {
 *                 bootDiskType: "pd-ssd",
 *                 bootDiskSizeGb: 30,
 *             },
 *         },
 *         workerConfig: {
 *             numInstances: 2,
 *             machineType: "e2-medium",
 *             minCpuPlatform: "Intel Skylake",
 *             diskConfig: {
 *                 bootDiskSizeGb: 30,
 *                 numLocalSsds: 1,
 *             },
 *         },
 *         preemptibleWorkerConfig: {
 *             numInstances: 0,
 *         },
 *         softwareConfig: {
 *             imageVersion: "2.0.35-debian10",
 *             overrideProperties: {
 *                 "dataproc:dataproc.allow.zero.workers": "true",
 *             },
 *         },
 *         gceClusterConfig: {
 *             tags: [
 *                 "foo",
 *                 "bar",
 *             ],
 *             serviceAccount: _default.email,
 *             serviceAccountScopes: ["cloud-platform"],
 *         },
 *         initializationActions: [{
 *             script: "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
 *             timeoutSec: 500,
 *         }],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * default = gcp.serviceaccount.Account("default",
 *     account_id="service-account-id",
 *     display_name="Service Account")
 * mycluster = gcp.dataproc.Cluster("mycluster",
 *     name="mycluster",
 *     region="us-central1",
 *     graceful_decommission_timeout="120s",
 *     labels={
 *         "foo": "bar",
 *     },
 *     cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
 *         staging_bucket="dataproc-staging-bucket",
 *         master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
 *             num_instances=1,
 *             machine_type="e2-medium",
 *             disk_config=gcp.dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs(
 *                 boot_disk_type="pd-ssd",
 *                 boot_disk_size_gb=30,
 *             ),
 *         ),
 *         worker_config=gcp.dataproc.ClusterClusterConfigWorkerConfigArgs(
 *             num_instances=2,
 *             machine_type="e2-medium",
 *             min_cpu_platform="Intel Skylake",
 *             disk_config=gcp.dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs(
 *                 boot_disk_size_gb=30,
 *                 num_local_ssds=1,
 *             ),
 *         ),
 *         preemptible_worker_config=gcp.dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs(
 *             num_instances=0,
 *         ),
 *         software_config=gcp.dataproc.ClusterClusterConfigSoftwareConfigArgs(
 *             image_version="2.0.35-debian10",
 *             override_properties={
 *                 "dataproc:dataproc.allow.zero.workers": "true",
 *             },
 *         ),
 *         gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
 *             tags=[
 *                 "foo",
 *                 "bar",
 *             ],
 *             service_account=default.email,
 *             service_account_scopes=["cloud-platform"],
 *         ),
 *         initialization_actions=[gcp.dataproc.ClusterClusterConfigInitializationActionArgs(
 *             script="gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
 *             timeout_sec=500,
 *         )],
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var @default = new Gcp.ServiceAccount.Account("default", new()
 *     {
 *         AccountId = "service-account-id",
 *         DisplayName = "Service Account",
 *     });
 *     var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
 *     {
 *         Name = "mycluster",
 *         Region = "us-central1",
 *         GracefulDecommissionTimeout = "120s",
 *         Labels =
 *         {
 *             { "foo", "bar" },
 *         },
 *         ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
 *         {
 *             StagingBucket = "dataproc-staging-bucket",
 *             MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
 *             {
 *                 NumInstances = 1,
 *                 MachineType = "e2-medium",
 *                 DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
 *                 {
 *                     BootDiskType = "pd-ssd",
 *                     BootDiskSizeGb = 30,
 *                 },
 *             },
 *             WorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigArgs
 *             {
 *                 NumInstances = 2,
 *                 MachineType = "e2-medium",
 *                 MinCpuPlatform = "Intel Skylake",
 *                 DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs
 *                 {
 *                     BootDiskSizeGb = 30,
 *                     NumLocalSsds = 1,
 *                 },
 *             },
 *             PreemptibleWorkerConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs
 *             {
 *                 NumInstances = 0,
 *             },
 *             SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
 *             {
 *                 ImageVersion = "2.0.35-debian10",
 *                 OverrideProperties =
 *                 {
 *                     { "dataproc:dataproc.allow.zero.workers", "true" },
 *                 },
 *             },
 *             GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
 *             {
 *                 Tags = new[]
 *                 {
 *                     "foo",
 *                     "bar",
 *                 },
 *                 ServiceAccount = @default.Email,
 *                 ServiceAccountScopes = new[]
 *                 {
 *                     "cloud-platform",
 *                 },
 *             },
 *             InitializationActions = new[]
 *             {
 *                 new Gcp.Dataproc.Inputs.ClusterClusterConfigInitializationActionArgs
 *                 {
 *                     Script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh",
 *                     TimeoutSec = 500,
 *                 },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := serviceaccount.NewAccount(ctx, "default", &serviceaccount.AccountArgs{
 * 			AccountId:   pulumi.String("service-account-id"),
 * 			DisplayName: pulumi.String("Service Account"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
 * 			Name:                        pulumi.String("mycluster"),
 * 			Region:                      pulumi.String("us-central1"),
 * 			GracefulDecommissionTimeout: pulumi.String("120s"),
 * 			Labels: pulumi.StringMap{
 * 				"foo": pulumi.String("bar"),
 * 			},
 * 			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
 * 				StagingBucket: pulumi.String("dataproc-staging-bucket"),
 * 				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
 * 					NumInstances: pulumi.Int(1),
 * 					MachineType:  pulumi.String("e2-medium"),
 * 					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
 * 						BootDiskType:   pulumi.String("pd-ssd"),
 * 						BootDiskSizeGb: pulumi.Int(30),
 * 					},
 * 				},
 * 				WorkerConfig: &dataproc.ClusterClusterConfigWorkerConfigArgs{
 * 					NumInstances:   pulumi.Int(2),
 * 					MachineType:    pulumi.String("e2-medium"),
 * 					MinCpuPlatform: pulumi.String("Intel Skylake"),
 * 					DiskConfig: &dataproc.ClusterClusterConfigWorkerConfigDiskConfigArgs{
 * 						BootDiskSizeGb: pulumi.Int(30),
 * 						NumLocalSsds:   pulumi.Int(1),
 * 					},
 * 				},
 * 				PreemptibleWorkerConfig: &dataproc.ClusterClusterConfigPreemptibleWorkerConfigArgs{
 * 					NumInstances: pulumi.Int(0),
 * 				},
 * 				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
 * 					ImageVersion: pulumi.String("2.0.35-debian10"),
 * 					OverrideProperties: pulumi.StringMap{
 * 						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
 * 					},
 * 				},
 * 				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
 * 					Tags: pulumi.StringArray{
 * 						pulumi.String("foo"),
 * 						pulumi.String("bar"),
 * 					},
 * 					ServiceAccount: _default.Email,
 * 					ServiceAccountScopes: pulumi.StringArray{
 * 						pulumi.String("cloud-platform"),
 * 					},
 * 				},
 * 				InitializationActions: dataproc.ClusterClusterConfigInitializationActionArray{
 * 					&dataproc.ClusterClusterConfigInitializationActionArgs{
 * 						Script:     pulumi.String("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh"),
 * 						TimeoutSec: pulumi.Int(500),
 * 					},
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.serviceaccount.Account;
 * import com.pulumi.gcp.serviceaccount.AccountArgs;
 * import com.pulumi.gcp.dataproc.Cluster;
 * import com.pulumi.gcp.dataproc.ClusterArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigWorkerConfigDiskConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigPreemptibleWorkerConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var default_ = new Account("default", AccountArgs.builder()
 *             .accountId("service-account-id")
 *             .displayName("Service Account")
 *             .build());
 *         var mycluster = new Cluster("mycluster", ClusterArgs.builder()
 *             .name("mycluster")
 *             .region("us-central1")
 *             .gracefulDecommissionTimeout("120s")
 *             .labels(Map.of("foo", "bar"))
 *             .clusterConfig(ClusterClusterConfigArgs.builder()
 *                 .stagingBucket("dataproc-staging-bucket")
 *                 .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
 *                     .numInstances(1)
 *                     .machineType("e2-medium")
 *                     .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
 *                         .bootDiskType("pd-ssd")
 *                         .bootDiskSizeGb(30)
 *                         .build())
 *                     .build())
 *                 .workerConfig(ClusterClusterConfigWorkerConfigArgs.builder()
 *                     .numInstances(2)
 *                     .machineType("e2-medium")
 *                     .minCpuPlatform("Intel Skylake")
 *                     .diskConfig(ClusterClusterConfigWorkerConfigDiskConfigArgs.builder()
 *                         .bootDiskSizeGb(30)
 *                         .numLocalSsds(1)
 *                         .build())
 *                     .build())
 *                 .preemptibleWorkerConfig(ClusterClusterConfigPreemptibleWorkerConfigArgs.builder()
 *                     .numInstances(0)
 *                     .build())
 *                 .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
 *                     .imageVersion("2.0.35-debian10")
 *                     .overrideProperties(Map.of("dataproc:dataproc.allow.zero.workers", "true"))
 *                     .build())
 *                 .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
 *                     .tags(
 *                         "foo",
 *                         "bar")
 *                     .serviceAccount(default_.email())
 *                     .serviceAccountScopes("cloud-platform")
 *                     .build())
 *                 .initializationActions(ClusterClusterConfigInitializationActionArgs.builder()
 *                     .script("gs://dataproc-initialization-actions/stackdriver/stackdriver.sh")
 *                     .timeoutSec(500)
 *                     .build())
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   default:
 *     type: gcp:serviceaccount:Account
 *     properties:
 *       accountId: service-account-id
 *       displayName: Service Account
 *   mycluster:
 *     type: gcp:dataproc:Cluster
 *     properties:
 *       name: mycluster
 *       region: us-central1
 *       gracefulDecommissionTimeout: 120s
 *       labels:
 *         foo: bar
 *       clusterConfig:
 *         stagingBucket: dataproc-staging-bucket
 *         masterConfig:
 *           numInstances: 1
 *           machineType: e2-medium
 *           diskConfig:
 *             bootDiskType: pd-ssd
 *             bootDiskSizeGb: 30
 *         workerConfig:
 *           numInstances: 2
 *           machineType: e2-medium
 *           minCpuPlatform: Intel Skylake
 *           diskConfig:
 *             bootDiskSizeGb: 30
 *             numLocalSsds: 1
 *         preemptibleWorkerConfig:
 *           numInstances: 0
 *         softwareConfig:
 *           imageVersion: 2.0.35-debian10
 *           overrideProperties:
 *             dataproc:dataproc.allow.zero.workers: 'true'
 *         gceClusterConfig:
 *           tags:
 *             - foo
 *             - bar
 *           serviceAccount: ${default.email}
 *           serviceAccountScopes:
 *             - cloud-platform
 *         initializationActions:
 *           - script: gs://dataproc-initialization-actions/stackdriver/stackdriver.sh
 *             timeoutSec: 500
 * ```
 * 
 * ### Using A GPU Accelerator
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const acceleratedCluster = new gcp.dataproc.Cluster("accelerated_cluster", {
 *     name: "my-cluster-with-gpu",
 *     region: "us-central1",
 *     clusterConfig: {
 *         gceClusterConfig: {
 *             zone: "us-central1-a",
 *         },
 *         masterConfig: {
 *             accelerators: [{
 *                 acceleratorType: "nvidia-tesla-k80",
 *                 acceleratorCount: 1,
 *             }],
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * accelerated_cluster = gcp.dataproc.Cluster("accelerated_cluster",
 *     name="my-cluster-with-gpu",
 *     region="us-central1",
 *     cluster_config=gcp.dataproc.ClusterClusterConfigArgs(
 *         gce_cluster_config=gcp.dataproc.ClusterClusterConfigGceClusterConfigArgs(
 *             zone="us-central1-a",
 *         ),
 *         master_config=gcp.dataproc.ClusterClusterConfigMasterConfigArgs(
 *             accelerators=[gcp.dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs(
 *                 accelerator_type="nvidia-tesla-k80",
 *                 accelerator_count=1,
 *             )],
 *         ),
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var acceleratedCluster = new Gcp.Dataproc.Cluster("accelerated_cluster", new()
 *     {
 *         Name = "my-cluster-with-gpu",
 *         Region = "us-central1",
 *         ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
 *         {
 *             GceClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigGceClusterConfigArgs
 *             {
 *                 Zone = "us-central1-a",
 *             },
 *             MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
 *             {
 *                 Accelerators = new[]
 *                 {
 *                     new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigAcceleratorArgs
 *                     {
 *                         AcceleratorType = "nvidia-tesla-k80",
 *                         AcceleratorCount = 1,
 *                     },
 *                 },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewCluster(ctx, "accelerated_cluster", &dataproc.ClusterArgs{
 * 			Name:   pulumi.String("my-cluster-with-gpu"),
 * 			Region: pulumi.String("us-central1"),
 * 			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
 * 				GceClusterConfig: &dataproc.ClusterClusterConfigGceClusterConfigArgs{
 * 					Zone: pulumi.String("us-central1-a"),
 * 				},
 * 				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
 * 					Accelerators: dataproc.ClusterClusterConfigMasterConfigAcceleratorArray{
 * 						&dataproc.ClusterClusterConfigMasterConfigAcceleratorArgs{
 * 							AcceleratorType:  pulumi.String("nvidia-tesla-k80"),
 * 							AcceleratorCount: pulumi.Int(1),
 * 						},
 * 					},
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Cluster;
 * import com.pulumi.gcp.dataproc.ClusterArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigGceClusterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var acceleratedCluster = new Cluster("acceleratedCluster", ClusterArgs.builder()
 *             .name("my-cluster-with-gpu")
 *             .region("us-central1")
 *             .clusterConfig(ClusterClusterConfigArgs.builder()
 *                 .gceClusterConfig(ClusterClusterConfigGceClusterConfigArgs.builder()
 *                     .zone("us-central1-a")
 *                     .build())
 *                 .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
 *                     .accelerators(ClusterClusterConfigMasterConfigAcceleratorArgs.builder()
 *                         .acceleratorType("nvidia-tesla-k80")
 *                         .acceleratorCount("1")
 *                         .build())
 *                     .build())
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   acceleratedCluster:
 *     type: gcp:dataproc:Cluster
 *     name: accelerated_cluster
 *     properties:
 *       name: my-cluster-with-gpu
 *       region: us-central1
 *       clusterConfig:
 *         gceClusterConfig:
 *           zone: us-central1-a
 *         masterConfig:
 *           accelerators:
 *             - acceleratorType: nvidia-tesla-k80
 *               acceleratorCount: '1'
 * ```
 * 
 * ## Import
 * This resource does not support import.
 * @property clusterConfig Allows you to configure various aspects of the cluster.
 * Structure defined below.
 * @property gracefulDecommissionTimeout
 * @property labels The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster.
 * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer
 * to the field 'effective_labels' for all of the labels present on the resource.
 * @property name The name of the cluster, unique within the project and
 * zone.
 * - - -
 * @property project The ID of the project in which the `cluster` will exist. If it
 * is not provided, the provider project is used.
 * @property region The region in which the cluster and associated nodes will be created in.
 * Defaults to `global`.
 * @property virtualClusterConfig Allows you to configure a virtual Dataproc on GKE cluster.
 * Structure defined below.
 */
public data class ClusterArgs(
    public val clusterConfig: Output? = null,
    public val gracefulDecommissionTimeout: Output? = null,
    public val labels: Output>? = null,
    public val name: Output? = null,
    public val project: Output? = null,
    public val region: Output? = null,
    public val virtualClusterConfig: Output? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.gcp.dataproc.ClusterArgs =
        com.pulumi.gcp.dataproc.ClusterArgs.builder()
            .clusterConfig(clusterConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .gracefulDecommissionTimeout(gracefulDecommissionTimeout?.applyValue({ args0 -> args0 }))
            .labels(labels?.applyValue({ args0 -> args0.map({ args0 -> args0.key.to(args0.value) }).toMap() }))
            .name(name?.applyValue({ args0 -> args0 }))
            .project(project?.applyValue({ args0 -> args0 }))
            .region(region?.applyValue({ args0 -> args0 }))
            .virtualClusterConfig(
                virtualClusterConfig?.applyValue({ args0 ->
                    args0.let({ args0 ->
                        args0.toJava()
                    })
                }),
            ).build()
}

/**
 * Builder for [ClusterArgs].
 */
@PulumiTagMarker
public class ClusterArgsBuilder internal constructor() {
    private var clusterConfig: Output? = null

    private var gracefulDecommissionTimeout: Output? = null

    private var labels: Output>? = null

    private var name: Output? = null

    private var project: Output? = null

    private var region: Output? = null

    private var virtualClusterConfig: Output? = null

    /**
     * @param value Allows you to configure various aspects of the cluster.
     * Structure defined below.
     */
    @JvmName("dtjlwrygesmuydhr")
    public suspend fun clusterConfig(`value`: Output) {
        this.clusterConfig = value
    }

    /**
     * @param value
     */
    @JvmName("opagkpqoqruyvnyo")
    public suspend fun gracefulDecommissionTimeout(`value`: Output) {
        this.gracefulDecommissionTimeout = value
    }

    /**
     * @param value The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer
     * to the field 'effective_labels' for all of the labels present on the resource.
     */
    @JvmName("ghdbaicmgyhmxnva")
    public suspend fun labels(`value`: Output>) {
        this.labels = value
    }

    /**
     * @param value The name of the cluster, unique within the project and
     * zone.
     * - - -
     */
    @JvmName("xenbvitprafdbmsi")
    public suspend fun name(`value`: Output) {
        this.name = value
    }

    /**
     * @param value The ID of the project in which the `cluster` will exist. If it
     * is not provided, the provider project is used.
     */
    @JvmName("gulokmajutuxvnml")
    public suspend fun project(`value`: Output) {
        this.project = value
    }

    /**
     * @param value The region in which the cluster and associated nodes will be created in.
     * Defaults to `global`.
     */
    @JvmName("ewmmvimcpwbvxkii")
    public suspend fun region(`value`: Output) {
        this.region = value
    }

    /**
     * @param value Allows you to configure a virtual Dataproc on GKE cluster.
     * Structure defined below.
     */
    @JvmName("ysjxkicejfkjiwnj")
    public suspend fun virtualClusterConfig(`value`: Output) {
        this.virtualClusterConfig = value
    }

    /**
     * @param value Allows you to configure various aspects of the cluster.
     * Structure defined below.
     */
    @JvmName("hiftqsoxqulnneje")
    public suspend fun clusterConfig(`value`: ClusterClusterConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.clusterConfig = mapped
    }

    /**
     * @param argument Allows you to configure various aspects of the cluster.
     * Structure defined below.
     */
    @JvmName("hbircatectbndibm")
    public suspend fun clusterConfig(argument: suspend ClusterClusterConfigArgsBuilder.() -> Unit) {
        val toBeMapped = ClusterClusterConfigArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.clusterConfig = mapped
    }

    /**
     * @param value
     */
    @JvmName("xkuqswacmwoitcoy")
    public suspend fun gracefulDecommissionTimeout(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.gracefulDecommissionTimeout = mapped
    }

    /**
     * @param value The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer
     * to the field 'effective_labels' for all of the labels present on the resource.
     */
    @JvmName("ligkerykajytywik")
    public suspend fun labels(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.labels = mapped
    }

    /**
     * @param values The list of the labels (key/value pairs) configured on the resource and to be applied to instances in the cluster.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. Please refer
     * to the field 'effective_labels' for all of the labels present on the resource.
     */
    @JvmName("mkpiclaiexqjuuyj")
    public fun labels(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.labels = mapped
    }

    /**
     * @param value The name of the cluster, unique within the project and
     * zone.
     * - - -
     */
    @JvmName("kvupubcrfafcyhmx")
    public suspend fun name(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.name = mapped
    }

    /**
     * @param value The ID of the project in which the `cluster` will exist. If it
     * is not provided, the provider project is used.
     */
    @JvmName("fenqgpacixwuugsp")
    public suspend fun project(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.project = mapped
    }

    /**
     * @param value The region in which the cluster and associated nodes will be created in.
     * Defaults to `global`.
     */
    @JvmName("ewmdbtpfhumdqmia")
    public suspend fun region(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.region = mapped
    }

    /**
     * @param value Allows you to configure a virtual Dataproc on GKE cluster.
     * Structure defined below.
     */
    @JvmName("tqpricthligrkjnf")
    public suspend fun virtualClusterConfig(`value`: ClusterVirtualClusterConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.virtualClusterConfig = mapped
    }

    /**
     * @param argument Allows you to configure a virtual Dataproc on GKE cluster.
     * Structure defined below.
     */
    @JvmName("tgdsynpfmyelrojq")
    public suspend fun virtualClusterConfig(argument: suspend ClusterVirtualClusterConfigArgsBuilder.() -> Unit) {
        val toBeMapped = ClusterVirtualClusterConfigArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.virtualClusterConfig = mapped
    }

    internal fun build(): ClusterArgs = ClusterArgs(
        clusterConfig = clusterConfig,
        gracefulDecommissionTimeout = gracefulDecommissionTimeout,
        labels = labels,
        name = name,
        project = project,
        region = region,
        virtualClusterConfig = virtualClusterConfig,
    )
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy