All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataproc.kotlin.BatchArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.13.1.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataproc.kotlin

import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.BatchArgs.builder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchEnvironmentConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchEnvironmentConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchPysparkBatchArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchPysparkBatchArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchRuntimeConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchRuntimeConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkBatchArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkBatchArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkRBatchArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkRBatchArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkSqlBatchArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.BatchSparkSqlBatchArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 * Dataproc Serverless Batches lets you run Spark workloads without requiring you to
 * provision and manage your own Dataproc cluster.
 * To get more information about Batch, see:
 * * [API documentation](https://cloud.google.com/dataproc-serverless/docs/reference/rest/v1/projects.locations.batches)
 * * How-to Guides
 *     * [Dataproc Serverless Batches Intro](https://cloud.google.com/dataproc-serverless/docs/overview)
 * ## Example Usage
 * ### Dataproc Batch Spark
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
 *     batchId: "tf-test-batch_75125",
 *     location: "us-central1",
 *     labels: {
 *         batch_test: "terraform",
 *     },
 *     runtimeConfig: {
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             subnetworkUri: "default",
 *             ttl: "3600s",
 *             networkTags: ["tag1"],
 *         },
 *     },
 *     sparkBatch: {
 *         mainClass: "org.apache.spark.examples.SparkPi",
 *         args: ["10"],
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
 *     batch_id="tf-test-batch_75125",
 *     location="us-central1",
 *     labels={
 *         "batch_test": "terraform",
 *     },
 *     runtime_config={
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "subnetwork_uri": "default",
 *             "ttl": "3600s",
 *             "network_tags": ["tag1"],
 *         },
 *     },
 *     spark_batch={
 *         "main_class": "org.apache.spark.examples.SparkPi",
 *         "args": ["10"],
 *         "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
 *     {
 *         BatchId = "tf-test-batch_75125",
 *         Location = "us-central1",
 *         Labels =
 *         {
 *             { "batch_test", "terraform" },
 *         },
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 SubnetworkUri = "default",
 *                 Ttl = "3600s",
 *                 NetworkTags = new[]
 *                 {
 *                     "tag1",
 *                 },
 *             },
 *         },
 *         SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
 *         {
 *             MainClass = "org.apache.spark.examples.SparkPi",
 *             Args = new[]
 *             {
 *                 "10",
 *             },
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("tf-test-batch_75125"),
 * 			Location: pulumi.String("us-central1"),
 * 			Labels: pulumi.StringMap{
 * 				"batch_test": pulumi.String("terraform"),
 * 			},
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					SubnetworkUri: pulumi.String("default"),
 * 					Ttl:           pulumi.String("3600s"),
 * 					NetworkTags: pulumi.StringArray{
 * 						pulumi.String("tag1"),
 * 					},
 * 				},
 * 			},
 * 			SparkBatch: &dataproc.BatchSparkBatchArgs{
 * 				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("10"),
 * 				},
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
 *             .batchId("tf-test-batch_75125")
 *             .location("us-central1")
 *             .labels(Map.of("batch_test", "terraform"))
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .subnetworkUri("default")
 *                     .ttl("3600s")
 *                     .networkTags("tag1")
 *                     .build())
 *                 .build())
 *             .sparkBatch(BatchSparkBatchArgs.builder()
 *                 .mainClass("org.apache.spark.examples.SparkPi")
 *                 .args("10")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchSpark:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_spark
 *     properties:
 *       batchId: tf-test-batch_75125
 *       location: us-central1
 *       labels:
 *         batch_test: terraform
 *       runtimeConfig:
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *       environmentConfig:
 *         executionConfig:
 *           subnetworkUri: default
 *           ttl: 3600s
 *           networkTags:
 *             - tag1
 *       sparkBatch:
 *         mainClass: org.apache.spark.examples.SparkPi
 *         args:
 *           - '10'
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 * ```
 * 
 * ### Dataproc Batch Spark Full
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const project = gcp.organizations.getProject({});
 * const gcsAccount = gcp.storage.getProjectServiceAccount({});
 * const bucket = new gcp.storage.Bucket("bucket", {
 *     uniformBucketLevelAccess: true,
 *     name: "dataproc-bucket",
 *     location: "US",
 *     forceDestroy: true,
 * });
 * const keyRing = new gcp.kms.KeyRing("key_ring", {
 *     name: "example-keyring",
 *     location: "us-central1",
 * });
 * const cryptoKey = new gcp.kms.CryptoKey("crypto_key", {
 *     name: "example-key",
 *     keyRing: keyRing.id,
 *     purpose: "ENCRYPT_DECRYPT",
 * });
 * const cryptoKeyMember1 = new gcp.kms.CryptoKeyIAMMember("crypto_key_member_1", {
 *     cryptoKeyId: cryptoKey.id,
 *     role: "roles/cloudkms.cryptoKeyEncrypterDecrypter",
 *     member: project.then(project => `serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com`),
 * });
 * const ms = new gcp.dataproc.MetastoreService("ms", {
 *     serviceId: "dataproc-batch",
 *     location: "us-central1",
 *     port: 9080,
 *     tier: "DEVELOPER",
 *     maintenanceWindow: {
 *         hourOfDay: 2,
 *         dayOfWeek: "SUNDAY",
 *     },
 *     hiveMetastoreConfig: {
 *         version: "3.1.2",
 *     },
 * });
 * const basic = new gcp.dataproc.Cluster("basic", {
 *     name: "dataproc-batch",
 *     region: "us-central1",
 *     clusterConfig: {
 *         softwareConfig: {
 *             overrideProperties: {
 *                 "dataproc:dataproc.allow.zero.workers": "true",
 *                 "spark:spark.history.fs.logDirectory": pulumi.interpolate`gs://${bucket.name}/*/spark-job-history`,
 *             },
 *         },
 *         endpointConfig: {
 *             enableHttpPortAccess: true,
 *         },
 *         masterConfig: {
 *             numInstances: 1,
 *             machineType: "e2-standard-2",
 *             diskConfig: {
 *                 bootDiskSizeGb: 35,
 *             },
 *         },
 *         metastoreConfig: {
 *             dataprocMetastoreService: ms.name,
 *         },
 *     },
 * });
 * const exampleBatchSpark = new gcp.dataproc.Batch("example_batch_spark", {
 *     batchId: "dataproc-batch",
 *     location: "us-central1",
 *     labels: {
 *         batch_test: "terraform",
 *     },
 *     runtimeConfig: {
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *         version: "2.2",
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             ttl: "3600s",
 *             networkTags: ["tag1"],
 *             kmsKey: cryptoKey.id,
 *             networkUri: "default",
 *             serviceAccount: project.then(project => `${project.number}[email protected]`),
 *             stagingBucket: bucket.name,
 *         },
 *         peripheralsConfig: {
 *             metastoreService: ms.name,
 *             sparkHistoryServerConfig: {
 *                 dataprocCluster: basic.id,
 *             },
 *         },
 *     },
 *     sparkBatch: {
 *         mainClass: "org.apache.spark.examples.SparkPi",
 *         args: ["10"],
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     },
 * }, {
 *     dependsOn: [cryptoKeyMember1],
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * project = gcp.organizations.get_project()
 * gcs_account = gcp.storage.get_project_service_account()
 * bucket = gcp.storage.Bucket("bucket",
 *     uniform_bucket_level_access=True,
 *     name="dataproc-bucket",
 *     location="US",
 *     force_destroy=True)
 * key_ring = gcp.kms.KeyRing("key_ring",
 *     name="example-keyring",
 *     location="us-central1")
 * crypto_key = gcp.kms.CryptoKey("crypto_key",
 *     name="example-key",
 *     key_ring=key_ring.id,
 *     purpose="ENCRYPT_DECRYPT")
 * crypto_key_member1 = gcp.kms.CryptoKeyIAMMember("crypto_key_member_1",
 *     crypto_key_id=crypto_key.id,
 *     role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
 *     member=f"serviceAccount:service-{project.number}@dataproc-accounts.iam.gserviceaccount.com")
 * ms = gcp.dataproc.MetastoreService("ms",
 *     service_id="dataproc-batch",
 *     location="us-central1",
 *     port=9080,
 *     tier="DEVELOPER",
 *     maintenance_window={
 *         "hour_of_day": 2,
 *         "day_of_week": "SUNDAY",
 *     },
 *     hive_metastore_config={
 *         "version": "3.1.2",
 *     })
 * basic = gcp.dataproc.Cluster("basic",
 *     name="dataproc-batch",
 *     region="us-central1",
 *     cluster_config={
 *         "software_config": {
 *             "override_properties": {
 *                 "dataproc:dataproc.allow.zero.workers": "true",
 *                 "spark:spark.history.fs.logDirectory": bucket.name.apply(lambda name: f"gs://{name}/*/spark-job-history"),
 *             },
 *         },
 *         "endpoint_config": {
 *             "enable_http_port_access": True,
 *         },
 *         "master_config": {
 *             "num_instances": 1,
 *             "machine_type": "e2-standard-2",
 *             "disk_config": {
 *                 "boot_disk_size_gb": 35,
 *             },
 *         },
 *         "metastore_config": {
 *             "dataproc_metastore_service": ms.name,
 *         },
 *     })
 * example_batch_spark = gcp.dataproc.Batch("example_batch_spark",
 *     batch_id="dataproc-batch",
 *     location="us-central1",
 *     labels={
 *         "batch_test": "terraform",
 *     },
 *     runtime_config={
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *         "version": "2.2",
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "ttl": "3600s",
 *             "network_tags": ["tag1"],
 *             "kms_key": crypto_key.id,
 *             "network_uri": "default",
 *             "service_account": f"{project.number}[email protected]",
 *             "staging_bucket": bucket.name,
 *         },
 *         "peripherals_config": {
 *             "metastore_service": ms.name,
 *             "spark_history_server_config": {
 *                 "dataproc_cluster": basic.id,
 *             },
 *         },
 *     },
 *     spark_batch={
 *         "main_class": "org.apache.spark.examples.SparkPi",
 *         "args": ["10"],
 *         "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     },
 *     opts = pulumi.ResourceOptions(depends_on=[crypto_key_member1]))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var project = Gcp.Organizations.GetProject.Invoke();
 *     var gcsAccount = Gcp.Storage.GetProjectServiceAccount.Invoke();
 *     var bucket = new Gcp.Storage.Bucket("bucket", new()
 *     {
 *         UniformBucketLevelAccess = true,
 *         Name = "dataproc-bucket",
 *         Location = "US",
 *         ForceDestroy = true,
 *     });
 *     var keyRing = new Gcp.Kms.KeyRing("key_ring", new()
 *     {
 *         Name = "example-keyring",
 *         Location = "us-central1",
 *     });
 *     var cryptoKey = new Gcp.Kms.CryptoKey("crypto_key", new()
 *     {
 *         Name = "example-key",
 *         KeyRing = keyRing.Id,
 *         Purpose = "ENCRYPT_DECRYPT",
 *     });
 *     var cryptoKeyMember1 = new Gcp.Kms.CryptoKeyIAMMember("crypto_key_member_1", new()
 *     {
 *         CryptoKeyId = cryptoKey.Id,
 *         Role = "roles/cloudkms.cryptoKeyEncrypterDecrypter",
 *         Member = $"serviceAccount:service-{project.Apply(getProjectResult => getProjectResult.Number)}@dataproc-accounts.iam.gserviceaccount.com",
 *     });
 *     var ms = new Gcp.Dataproc.MetastoreService("ms", new()
 *     {
 *         ServiceId = "dataproc-batch",
 *         Location = "us-central1",
 *         Port = 9080,
 *         Tier = "DEVELOPER",
 *         MaintenanceWindow = new Gcp.Dataproc.Inputs.MetastoreServiceMaintenanceWindowArgs
 *         {
 *             HourOfDay = 2,
 *             DayOfWeek = "SUNDAY",
 *         },
 *         HiveMetastoreConfig = new Gcp.Dataproc.Inputs.MetastoreServiceHiveMetastoreConfigArgs
 *         {
 *             Version = "3.1.2",
 *         },
 *     });
 *     var basic = new Gcp.Dataproc.Cluster("basic", new()
 *     {
 *         Name = "dataproc-batch",
 *         Region = "us-central1",
 *         ClusterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigArgs
 *         {
 *             SoftwareConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigSoftwareConfigArgs
 *             {
 *                 OverrideProperties =
 *                 {
 *                     { "dataproc:dataproc.allow.zero.workers", "true" },
 *                     { "spark:spark.history.fs.logDirectory", bucket.Name.Apply(name => $"gs://{name}/*/spark-job-history") },
 *                 },
 *             },
 *             EndpointConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigEndpointConfigArgs
 *             {
 *                 EnableHttpPortAccess = true,
 *             },
 *             MasterConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigArgs
 *             {
 *                 NumInstances = 1,
 *                 MachineType = "e2-standard-2",
 *                 DiskConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMasterConfigDiskConfigArgs
 *                 {
 *                     BootDiskSizeGb = 35,
 *                 },
 *             },
 *             MetastoreConfig = new Gcp.Dataproc.Inputs.ClusterClusterConfigMetastoreConfigArgs
 *             {
 *                 DataprocMetastoreService = ms.Name,
 *             },
 *         },
 *     });
 *     var exampleBatchSpark = new Gcp.Dataproc.Batch("example_batch_spark", new()
 *     {
 *         BatchId = "dataproc-batch",
 *         Location = "us-central1",
 *         Labels =
 *         {
 *             { "batch_test", "terraform" },
 *         },
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *             Version = "2.2",
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 Ttl = "3600s",
 *                 NetworkTags = new[]
 *                 {
 *                     "tag1",
 *                 },
 *                 KmsKey = cryptoKey.Id,
 *                 NetworkUri = "default",
 *                 ServiceAccount = $"{project.Apply(getProjectResult => getProjectResult.Number)}[email protected]",
 *                 StagingBucket = bucket.Name,
 *             },
 *             PeripheralsConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigArgs
 *             {
 *                 MetastoreService = ms.Name,
 *                 SparkHistoryServerConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs
 *                 {
 *                     DataprocCluster = basic.Id,
 *                 },
 *             },
 *         },
 *         SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
 *         {
 *             MainClass = "org.apache.spark.examples.SparkPi",
 *             Args = new[]
 *             {
 *                 "10",
 *             },
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *         },
 *     }, new CustomResourceOptions
 *     {
 *         DependsOn =
 *         {
 *             cryptoKeyMember1,
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"fmt"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/kms"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/organizations"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/storage"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		project, err := organizations.LookupProject(ctx, &organizations.LookupProjectArgs{}, nil)
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = storage.GetProjectServiceAccount(ctx, &storage.GetProjectServiceAccountArgs{}, nil)
 * 		if err != nil {
 * 			return err
 * 		}
 * 		bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
 * 			UniformBucketLevelAccess: pulumi.Bool(true),
 * 			Name:                     pulumi.String("dataproc-bucket"),
 * 			Location:                 pulumi.String("US"),
 * 			ForceDestroy:             pulumi.Bool(true),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		keyRing, err := kms.NewKeyRing(ctx, "key_ring", &kms.KeyRingArgs{
 * 			Name:     pulumi.String("example-keyring"),
 * 			Location: pulumi.String("us-central1"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		cryptoKey, err := kms.NewCryptoKey(ctx, "crypto_key", &kms.CryptoKeyArgs{
 * 			Name:    pulumi.String("example-key"),
 * 			KeyRing: keyRing.ID(),
 * 			Purpose: pulumi.String("ENCRYPT_DECRYPT"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		cryptoKeyMember1, err := kms.NewCryptoKeyIAMMember(ctx, "crypto_key_member_1", &kms.CryptoKeyIAMMemberArgs{
 * 			CryptoKeyId: cryptoKey.ID(),
 * 			Role:        pulumi.String("roles/cloudkms.cryptoKeyEncrypterDecrypter"),
 * 			Member:      pulumi.Sprintf("serviceAccount:service-%[email protected]", project.Number),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		ms, err := dataproc.NewMetastoreService(ctx, "ms", &dataproc.MetastoreServiceArgs{
 * 			ServiceId: pulumi.String("dataproc-batch"),
 * 			Location:  pulumi.String("us-central1"),
 * 			Port:      pulumi.Int(9080),
 * 			Tier:      pulumi.String("DEVELOPER"),
 * 			MaintenanceWindow: &dataproc.MetastoreServiceMaintenanceWindowArgs{
 * 				HourOfDay: pulumi.Int(2),
 * 				DayOfWeek: pulumi.String("SUNDAY"),
 * 			},
 * 			HiveMetastoreConfig: &dataproc.MetastoreServiceHiveMetastoreConfigArgs{
 * 				Version: pulumi.String("3.1.2"),
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		basic, err := dataproc.NewCluster(ctx, "basic", &dataproc.ClusterArgs{
 * 			Name:   pulumi.String("dataproc-batch"),
 * 			Region: pulumi.String("us-central1"),
 * 			ClusterConfig: &dataproc.ClusterClusterConfigArgs{
 * 				SoftwareConfig: &dataproc.ClusterClusterConfigSoftwareConfigArgs{
 * 					OverrideProperties: pulumi.StringMap{
 * 						"dataproc:dataproc.allow.zero.workers": pulumi.String("true"),
 * 						"spark:spark.history.fs.logDirectory": bucket.Name.ApplyT(func(name string) (string, error) {
 * 							return fmt.Sprintf("gs://%v/*/spark-job-history", name), nil
 * 						}).(pulumi.StringOutput),
 * 					},
 * 				},
 * 				EndpointConfig: &dataproc.ClusterClusterConfigEndpointConfigArgs{
 * 					EnableHttpPortAccess: pulumi.Bool(true),
 * 				},
 * 				MasterConfig: &dataproc.ClusterClusterConfigMasterConfigArgs{
 * 					NumInstances: pulumi.Int(1),
 * 					MachineType:  pulumi.String("e2-standard-2"),
 * 					DiskConfig: &dataproc.ClusterClusterConfigMasterConfigDiskConfigArgs{
 * 						BootDiskSizeGb: pulumi.Int(35),
 * 					},
 * 				},
 * 				MetastoreConfig: &dataproc.ClusterClusterConfigMetastoreConfigArgs{
 * 					DataprocMetastoreService: ms.Name,
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = dataproc.NewBatch(ctx, "example_batch_spark", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("dataproc-batch"),
 * 			Location: pulumi.String("us-central1"),
 * 			Labels: pulumi.StringMap{
 * 				"batch_test": pulumi.String("terraform"),
 * 			},
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 				Version: pulumi.String("2.2"),
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					Ttl: pulumi.String("3600s"),
 * 					NetworkTags: pulumi.StringArray{
 * 						pulumi.String("tag1"),
 * 					},
 * 					KmsKey:         cryptoKey.ID(),
 * 					NetworkUri:     pulumi.String("default"),
 * 					ServiceAccount: pulumi.Sprintf("%[email protected]", project.Number),
 * 					StagingBucket:  bucket.Name,
 * 				},
 * 				PeripheralsConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigArgs{
 * 					MetastoreService: ms.Name,
 * 					SparkHistoryServerConfig: &dataproc.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs{
 * 						DataprocCluster: basic.ID(),
 * 					},
 * 				},
 * 			},
 * 			SparkBatch: &dataproc.BatchSparkBatchArgs{
 * 				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("10"),
 * 				},
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 			},
 * 		}, pulumi.DependsOn([]pulumi.Resource{
 * 			cryptoKeyMember1,
 * 		}))
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.organizations.OrganizationsFunctions;
 * import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
 * import com.pulumi.gcp.storage.StorageFunctions;
 * import com.pulumi.gcp.storage.inputs.GetProjectServiceAccountArgs;
 * import com.pulumi.gcp.storage.Bucket;
 * import com.pulumi.gcp.storage.BucketArgs;
 * import com.pulumi.gcp.kms.KeyRing;
 * import com.pulumi.gcp.kms.KeyRingArgs;
 * import com.pulumi.gcp.kms.CryptoKey;
 * import com.pulumi.gcp.kms.CryptoKeyArgs;
 * import com.pulumi.gcp.kms.CryptoKeyIAMMember;
 * import com.pulumi.gcp.kms.CryptoKeyIAMMemberArgs;
 * import com.pulumi.gcp.dataproc.MetastoreService;
 * import com.pulumi.gcp.dataproc.MetastoreServiceArgs;
 * import com.pulumi.gcp.dataproc.inputs.MetastoreServiceMaintenanceWindowArgs;
 * import com.pulumi.gcp.dataproc.inputs.MetastoreServiceHiveMetastoreConfigArgs;
 * import com.pulumi.gcp.dataproc.Cluster;
 * import com.pulumi.gcp.dataproc.ClusterArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigSoftwareConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigEndpointConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMasterConfigDiskConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.ClusterClusterConfigMetastoreConfigArgs;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
 * import com.pulumi.resources.CustomResourceOptions;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         final var project = OrganizationsFunctions.getProject();
 *         final var gcsAccount = StorageFunctions.getProjectServiceAccount();
 *         var bucket = new Bucket("bucket", BucketArgs.builder()
 *             .uniformBucketLevelAccess(true)
 *             .name("dataproc-bucket")
 *             .location("US")
 *             .forceDestroy(true)
 *             .build());
 *         var keyRing = new KeyRing("keyRing", KeyRingArgs.builder()
 *             .name("example-keyring")
 *             .location("us-central1")
 *             .build());
 *         var cryptoKey = new CryptoKey("cryptoKey", CryptoKeyArgs.builder()
 *             .name("example-key")
 *             .keyRing(keyRing.id())
 *             .purpose("ENCRYPT_DECRYPT")
 *             .build());
 *         var cryptoKeyMember1 = new CryptoKeyIAMMember("cryptoKeyMember1", CryptoKeyIAMMemberArgs.builder()
 *             .cryptoKeyId(cryptoKey.id())
 *             .role("roles/cloudkms.cryptoKeyEncrypterDecrypter")
 *             .member(String.format("serviceAccount:service-%[email protected]", project.applyValue(getProjectResult -> getProjectResult.number())))
 *             .build());
 *         var ms = new MetastoreService("ms", MetastoreServiceArgs.builder()
 *             .serviceId("dataproc-batch")
 *             .location("us-central1")
 *             .port(9080)
 *             .tier("DEVELOPER")
 *             .maintenanceWindow(MetastoreServiceMaintenanceWindowArgs.builder()
 *                 .hourOfDay(2)
 *                 .dayOfWeek("SUNDAY")
 *                 .build())
 *             .hiveMetastoreConfig(MetastoreServiceHiveMetastoreConfigArgs.builder()
 *                 .version("3.1.2")
 *                 .build())
 *             .build());
 *         var basic = new Cluster("basic", ClusterArgs.builder()
 *             .name("dataproc-batch")
 *             .region("us-central1")
 *             .clusterConfig(ClusterClusterConfigArgs.builder()
 *                 .softwareConfig(ClusterClusterConfigSoftwareConfigArgs.builder()
 *                     .overrideProperties(Map.ofEntries(
 *                         Map.entry("dataproc:dataproc.allow.zero.workers", "true"),
 *                         Map.entry("spark:spark.history.fs.logDirectory", bucket.name().applyValue(name -> String.format("gs://%s/*/spark-job-history", name)))
 *                     ))
 *                     .build())
 *                 .endpointConfig(ClusterClusterConfigEndpointConfigArgs.builder()
 *                     .enableHttpPortAccess(true)
 *                     .build())
 *                 .masterConfig(ClusterClusterConfigMasterConfigArgs.builder()
 *                     .numInstances(1)
 *                     .machineType("e2-standard-2")
 *                     .diskConfig(ClusterClusterConfigMasterConfigDiskConfigArgs.builder()
 *                         .bootDiskSizeGb(35)
 *                         .build())
 *                     .build())
 *                 .metastoreConfig(ClusterClusterConfigMetastoreConfigArgs.builder()
 *                     .dataprocMetastoreService(ms.name())
 *                     .build())
 *                 .build())
 *             .build());
 *         var exampleBatchSpark = new Batch("exampleBatchSpark", BatchArgs.builder()
 *             .batchId("dataproc-batch")
 *             .location("us-central1")
 *             .labels(Map.of("batch_test", "terraform"))
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .version("2.2")
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .ttl("3600s")
 *                     .networkTags("tag1")
 *                     .kmsKey(cryptoKey.id())
 *                     .networkUri("default")
 *                     .serviceAccount(String.format("%[email protected]", project.applyValue(getProjectResult -> getProjectResult.number())))
 *                     .stagingBucket(bucket.name())
 *                     .build())
 *                 .peripheralsConfig(BatchEnvironmentConfigPeripheralsConfigArgs.builder()
 *                     .metastoreService(ms.name())
 *                     .sparkHistoryServerConfig(BatchEnvironmentConfigPeripheralsConfigSparkHistoryServerConfigArgs.builder()
 *                         .dataprocCluster(basic.id())
 *                         .build())
 *                     .build())
 *                 .build())
 *             .sparkBatch(BatchSparkBatchArgs.builder()
 *                 .mainClass("org.apache.spark.examples.SparkPi")
 *                 .args("10")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .build())
 *             .build(), CustomResourceOptions.builder()
 *                 .dependsOn(cryptoKeyMember1)
 *                 .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchSpark:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_spark
 *     properties:
 *       batchId: dataproc-batch
 *       location: us-central1
 *       labels:
 *         batch_test: terraform
 *       runtimeConfig:
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *         version: '2.2'
 *       environmentConfig:
 *         executionConfig:
 *           ttl: 3600s
 *           networkTags:
 *             - tag1
 *           kmsKey: ${cryptoKey.id}
 *           networkUri: default
 *           serviceAccount: ${project.number}[email protected]
 *           stagingBucket: ${bucket.name}
 *         peripheralsConfig:
 *           metastoreService: ${ms.name}
 *           sparkHistoryServerConfig:
 *             dataprocCluster: ${basic.id}
 *       sparkBatch:
 *         mainClass: org.apache.spark.examples.SparkPi
 *         args:
 *           - '10'
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 *     options:
 *       dependsOn:
 *         - ${cryptoKeyMember1}
 *   bucket:
 *     type: gcp:storage:Bucket
 *     properties:
 *       uniformBucketLevelAccess: true
 *       name: dataproc-bucket
 *       location: US
 *       forceDestroy: true
 *   cryptoKey:
 *     type: gcp:kms:CryptoKey
 *     name: crypto_key
 *     properties:
 *       name: example-key
 *       keyRing: ${keyRing.id}
 *       purpose: ENCRYPT_DECRYPT
 *   keyRing:
 *     type: gcp:kms:KeyRing
 *     name: key_ring
 *     properties:
 *       name: example-keyring
 *       location: us-central1
 *   cryptoKeyMember1:
 *     type: gcp:kms:CryptoKeyIAMMember
 *     name: crypto_key_member_1
 *     properties:
 *       cryptoKeyId: ${cryptoKey.id}
 *       role: roles/cloudkms.cryptoKeyEncrypterDecrypter
 *       member: serviceAccount:service-${project.number}@dataproc-accounts.iam.gserviceaccount.com
 *   basic:
 *     type: gcp:dataproc:Cluster
 *     properties:
 *       name: dataproc-batch
 *       region: us-central1
 *       clusterConfig:
 *         softwareConfig:
 *           overrideProperties:
 *             dataproc:dataproc.allow.zero.workers: 'true'
 *             spark:spark.history.fs.logDirectory: gs://${bucket.name}/*/spark-job-history
 *         endpointConfig:
 *           enableHttpPortAccess: true
 *         masterConfig:
 *           numInstances: 1
 *           machineType: e2-standard-2
 *           diskConfig:
 *             bootDiskSizeGb: 35
 *         metastoreConfig:
 *           dataprocMetastoreService: ${ms.name}
 *   ms:
 *     type: gcp:dataproc:MetastoreService
 *     properties:
 *       serviceId: dataproc-batch
 *       location: us-central1
 *       port: 9080
 *       tier: DEVELOPER
 *       maintenanceWindow:
 *         hourOfDay: 2
 *         dayOfWeek: SUNDAY
 *       hiveMetastoreConfig:
 *         version: 3.1.2
 * variables:
 *   project:
 *     fn::invoke:
 *       function: gcp:organizations:getProject
 *       arguments: {}
 *   gcsAccount:
 *     fn::invoke:
 *       function: gcp:storage:getProjectServiceAccount
 *       arguments: {}
 * ```
 * 
 * ### Dataproc Batch Sparksql
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const exampleBatchSparsql = new gcp.dataproc.Batch("example_batch_sparsql", {
 *     batchId: "tf-test-batch_88722",
 *     location: "us-central1",
 *     runtimeConfig: {
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             subnetworkUri: "default",
 *         },
 *     },
 *     sparkSqlBatch: {
 *         queryFileUri: "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         queryVariables: {
 *             name: "value",
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * example_batch_sparsql = gcp.dataproc.Batch("example_batch_sparsql",
 *     batch_id="tf-test-batch_88722",
 *     location="us-central1",
 *     runtime_config={
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "subnetwork_uri": "default",
 *         },
 *     },
 *     spark_sql_batch={
 *         "query_file_uri": "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
 *         "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         "query_variables": {
 *             "name": "value",
 *         },
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var exampleBatchSparsql = new Gcp.Dataproc.Batch("example_batch_sparsql", new()
 *     {
 *         BatchId = "tf-test-batch_88722",
 *         Location = "us-central1",
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 SubnetworkUri = "default",
 *             },
 *         },
 *         SparkSqlBatch = new Gcp.Dataproc.Inputs.BatchSparkSqlBatchArgs
 *         {
 *             QueryFileUri = "gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql",
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *             QueryVariables =
 *             {
 *                 { "name", "value" },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewBatch(ctx, "example_batch_sparsql", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("tf-test-batch_88722"),
 * 			Location: pulumi.String("us-central1"),
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					SubnetworkUri: pulumi.String("default"),
 * 				},
 * 			},
 * 			SparkSqlBatch: &dataproc.BatchSparkSqlBatchArgs{
 * 				QueryFileUri: pulumi.String("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql"),
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 				QueryVariables: pulumi.StringMap{
 * 					"name": pulumi.String("value"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchSparkSqlBatchArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var exampleBatchSparsql = new Batch("exampleBatchSparsql", BatchArgs.builder()
 *             .batchId("tf-test-batch_88722")
 *             .location("us-central1")
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .subnetworkUri("default")
 *                     .build())
 *                 .build())
 *             .sparkSqlBatch(BatchSparkSqlBatchArgs.builder()
 *                 .queryFileUri("gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .queryVariables(Map.of("name", "value"))
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchSparsql:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_sparsql
 *     properties:
 *       batchId: tf-test-batch_88722
 *       location: us-central1
 *       runtimeConfig:
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *       environmentConfig:
 *         executionConfig:
 *           subnetworkUri: default
 *       sparkSqlBatch:
 *         queryFileUri: gs://dataproc-examples/spark-sql/natality/cigarette_correlations.sql
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 *         queryVariables:
 *           name: value
 * ```
 * 
 * ### Dataproc Batch Pyspark
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const exampleBatchPyspark = new gcp.dataproc.Batch("example_batch_pyspark", {
 *     batchId: "tf-test-batch_39249",
 *     location: "us-central1",
 *     runtimeConfig: {
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             subnetworkUri: "default",
 *         },
 *     },
 *     pysparkBatch: {
 *         mainPythonFileUri: "https://storage.googleapis.com/terraform-batches/test_util.py",
 *         args: ["10"],
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         pythonFileUris: ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
 *         archiveUris: [
 *             "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
 *             "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
 *             "https://storage.googleapis.com/terraform-batches/animals.txt",
 *         ],
 *         fileUris: ["https://storage.googleapis.com/terraform-batches/people.txt"],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * example_batch_pyspark = gcp.dataproc.Batch("example_batch_pyspark",
 *     batch_id="tf-test-batch_39249",
 *     location="us-central1",
 *     runtime_config={
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "subnetwork_uri": "default",
 *         },
 *     },
 *     pyspark_batch={
 *         "main_python_file_uri": "https://storage.googleapis.com/terraform-batches/test_util.py",
 *         "args": ["10"],
 *         "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         "python_file_uris": ["gs://dataproc-examples/pyspark/hello-world/hello-world.py"],
 *         "archive_uris": [
 *             "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
 *             "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
 *             "https://storage.googleapis.com/terraform-batches/animals.txt",
 *         ],
 *         "file_uris": ["https://storage.googleapis.com/terraform-batches/people.txt"],
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var exampleBatchPyspark = new Gcp.Dataproc.Batch("example_batch_pyspark", new()
 *     {
 *         BatchId = "tf-test-batch_39249",
 *         Location = "us-central1",
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 SubnetworkUri = "default",
 *             },
 *         },
 *         PysparkBatch = new Gcp.Dataproc.Inputs.BatchPysparkBatchArgs
 *         {
 *             MainPythonFileUri = "https://storage.googleapis.com/terraform-batches/test_util.py",
 *             Args = new[]
 *             {
 *                 "10",
 *             },
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *             PythonFileUris = new[]
 *             {
 *                 "gs://dataproc-examples/pyspark/hello-world/hello-world.py",
 *             },
 *             ArchiveUris = new[]
 *             {
 *                 "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
 *                 "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
 *                 "https://storage.googleapis.com/terraform-batches/animals.txt",
 *             },
 *             FileUris = new[]
 *             {
 *                 "https://storage.googleapis.com/terraform-batches/people.txt",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewBatch(ctx, "example_batch_pyspark", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("tf-test-batch_39249"),
 * 			Location: pulumi.String("us-central1"),
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					SubnetworkUri: pulumi.String("default"),
 * 				},
 * 			},
 * 			PysparkBatch: &dataproc.BatchPysparkBatchArgs{
 * 				MainPythonFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/test_util.py"),
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("10"),
 * 				},
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 				PythonFileUris: pulumi.StringArray{
 * 					pulumi.String("gs://dataproc-examples/pyspark/hello-world/hello-world.py"),
 * 				},
 * 				ArchiveUris: pulumi.StringArray{
 * 					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked"),
 * 					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt.jar"),
 * 					pulumi.String("https://storage.googleapis.com/terraform-batches/animals.txt"),
 * 				},
 * 				FileUris: pulumi.StringArray{
 * 					pulumi.String("https://storage.googleapis.com/terraform-batches/people.txt"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchPysparkBatchArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var exampleBatchPyspark = new Batch("exampleBatchPyspark", BatchArgs.builder()
 *             .batchId("tf-test-batch_39249")
 *             .location("us-central1")
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .subnetworkUri("default")
 *                     .build())
 *                 .build())
 *             .pysparkBatch(BatchPysparkBatchArgs.builder()
 *                 .mainPythonFileUri("https://storage.googleapis.com/terraform-batches/test_util.py")
 *                 .args("10")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .pythonFileUris("gs://dataproc-examples/pyspark/hello-world/hello-world.py")
 *                 .archiveUris(
 *                     "https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked",
 *                     "https://storage.googleapis.com/terraform-batches/animals.txt.jar",
 *                     "https://storage.googleapis.com/terraform-batches/animals.txt")
 *                 .fileUris("https://storage.googleapis.com/terraform-batches/people.txt")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchPyspark:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_pyspark
 *     properties:
 *       batchId: tf-test-batch_39249
 *       location: us-central1
 *       runtimeConfig:
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *       environmentConfig:
 *         executionConfig:
 *           subnetworkUri: default
 *       pysparkBatch:
 *         mainPythonFileUri: https://storage.googleapis.com/terraform-batches/test_util.py
 *         args:
 *           - '10'
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 *         pythonFileUris:
 *           - gs://dataproc-examples/pyspark/hello-world/hello-world.py
 *         archiveUris:
 *           - https://storage.googleapis.com/terraform-batches/animals.txt.tar.gz#unpacked
 *           - https://storage.googleapis.com/terraform-batches/animals.txt.jar
 *           - https://storage.googleapis.com/terraform-batches/animals.txt
 *         fileUris:
 *           - https://storage.googleapis.com/terraform-batches/people.txt
 * ```
 * 
 * ### Dataproc Batch Sparkr
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const exampleBatchSparkr = new gcp.dataproc.Batch("example_batch_sparkr", {
 *     batchId: "tf-test-batch_74391",
 *     location: "us-central1",
 *     labels: {
 *         batch_test: "terraform",
 *     },
 *     runtimeConfig: {
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             subnetworkUri: "default",
 *             ttl: "3600s",
 *             networkTags: ["tag1"],
 *         },
 *     },
 *     sparkRBatch: {
 *         mainRFileUri: "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
 *         args: ["https://storage.googleapis.com/terraform-batches/flights.csv"],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * example_batch_sparkr = gcp.dataproc.Batch("example_batch_sparkr",
 *     batch_id="tf-test-batch_74391",
 *     location="us-central1",
 *     labels={
 *         "batch_test": "terraform",
 *     },
 *     runtime_config={
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "subnetwork_uri": "default",
 *             "ttl": "3600s",
 *             "network_tags": ["tag1"],
 *         },
 *     },
 *     spark_r_batch={
 *         "main_r_file_uri": "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
 *         "args": ["https://storage.googleapis.com/terraform-batches/flights.csv"],
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var exampleBatchSparkr = new Gcp.Dataproc.Batch("example_batch_sparkr", new()
 *     {
 *         BatchId = "tf-test-batch_74391",
 *         Location = "us-central1",
 *         Labels =
 *         {
 *             { "batch_test", "terraform" },
 *         },
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 SubnetworkUri = "default",
 *                 Ttl = "3600s",
 *                 NetworkTags = new[]
 *                 {
 *                     "tag1",
 *                 },
 *             },
 *         },
 *         SparkRBatch = new Gcp.Dataproc.Inputs.BatchSparkRBatchArgs
 *         {
 *             MainRFileUri = "https://storage.googleapis.com/terraform-batches/spark-r-flights.r",
 *             Args = new[]
 *             {
 *                 "https://storage.googleapis.com/terraform-batches/flights.csv",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewBatch(ctx, "example_batch_sparkr", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("tf-test-batch_74391"),
 * 			Location: pulumi.String("us-central1"),
 * 			Labels: pulumi.StringMap{
 * 				"batch_test": pulumi.String("terraform"),
 * 			},
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					SubnetworkUri: pulumi.String("default"),
 * 					Ttl:           pulumi.String("3600s"),
 * 					NetworkTags: pulumi.StringArray{
 * 						pulumi.String("tag1"),
 * 					},
 * 				},
 * 			},
 * 			SparkRBatch: &dataproc.BatchSparkRBatchArgs{
 * 				MainRFileUri: pulumi.String("https://storage.googleapis.com/terraform-batches/spark-r-flights.r"),
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("https://storage.googleapis.com/terraform-batches/flights.csv"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchSparkRBatchArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var exampleBatchSparkr = new Batch("exampleBatchSparkr", BatchArgs.builder()
 *             .batchId("tf-test-batch_74391")
 *             .location("us-central1")
 *             .labels(Map.of("batch_test", "terraform"))
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .subnetworkUri("default")
 *                     .ttl("3600s")
 *                     .networkTags("tag1")
 *                     .build())
 *                 .build())
 *             .sparkRBatch(BatchSparkRBatchArgs.builder()
 *                 .mainRFileUri("https://storage.googleapis.com/terraform-batches/spark-r-flights.r")
 *                 .args("https://storage.googleapis.com/terraform-batches/flights.csv")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchSparkr:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_sparkr
 *     properties:
 *       batchId: tf-test-batch_74391
 *       location: us-central1
 *       labels:
 *         batch_test: terraform
 *       runtimeConfig:
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *       environmentConfig:
 *         executionConfig:
 *           subnetworkUri: default
 *           ttl: 3600s
 *           networkTags:
 *             - tag1
 *       sparkRBatch:
 *         mainRFileUri: https://storage.googleapis.com/terraform-batches/spark-r-flights.r
 *         args:
 *           - https://storage.googleapis.com/terraform-batches/flights.csv
 * ```
 * 
 * ### Dataproc Batch Autotuning
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const exampleBatchAutotuning = new gcp.dataproc.Batch("example_batch_autotuning", {
 *     batchId: "tf-test-batch_16511",
 *     location: "us-central1",
 *     labels: {
 *         batch_test: "terraform",
 *     },
 *     runtimeConfig: {
 *         version: "2.2",
 *         properties: {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *         cohort: "tf-dataproc-batch-example",
 *         autotuningConfig: {
 *             scenarios: [
 *                 "SCALING",
 *                 "MEMORY",
 *             ],
 *         },
 *     },
 *     environmentConfig: {
 *         executionConfig: {
 *             subnetworkUri: "default",
 *             ttl: "3600s",
 *         },
 *     },
 *     sparkBatch: {
 *         mainClass: "org.apache.spark.examples.SparkPi",
 *         args: ["10"],
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * example_batch_autotuning = gcp.dataproc.Batch("example_batch_autotuning",
 *     batch_id="tf-test-batch_16511",
 *     location="us-central1",
 *     labels={
 *         "batch_test": "terraform",
 *     },
 *     runtime_config={
 *         "version": "2.2",
 *         "properties": {
 *             "spark.dynamicAllocation.enabled": "false",
 *             "spark.executor.instances": "2",
 *         },
 *         "cohort": "tf-dataproc-batch-example",
 *         "autotuning_config": {
 *             "scenarios": [
 *                 "SCALING",
 *                 "MEMORY",
 *             ],
 *         },
 *     },
 *     environment_config={
 *         "execution_config": {
 *             "subnetwork_uri": "default",
 *             "ttl": "3600s",
 *         },
 *     },
 *     spark_batch={
 *         "main_class": "org.apache.spark.examples.SparkPi",
 *         "args": ["10"],
 *         "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var exampleBatchAutotuning = new Gcp.Dataproc.Batch("example_batch_autotuning", new()
 *     {
 *         BatchId = "tf-test-batch_16511",
 *         Location = "us-central1",
 *         Labels =
 *         {
 *             { "batch_test", "terraform" },
 *         },
 *         RuntimeConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigArgs
 *         {
 *             Version = "2.2",
 *             Properties =
 *             {
 *                 { "spark.dynamicAllocation.enabled", "false" },
 *                 { "spark.executor.instances", "2" },
 *             },
 *             Cohort = "tf-dataproc-batch-example",
 *             AutotuningConfig = new Gcp.Dataproc.Inputs.BatchRuntimeConfigAutotuningConfigArgs
 *             {
 *                 Scenarios = new[]
 *                 {
 *                     "SCALING",
 *                     "MEMORY",
 *                 },
 *             },
 *         },
 *         EnvironmentConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigArgs
 *         {
 *             ExecutionConfig = new Gcp.Dataproc.Inputs.BatchEnvironmentConfigExecutionConfigArgs
 *             {
 *                 SubnetworkUri = "default",
 *                 Ttl = "3600s",
 *             },
 *         },
 *         SparkBatch = new Gcp.Dataproc.Inputs.BatchSparkBatchArgs
 *         {
 *             MainClass = "org.apache.spark.examples.SparkPi",
 *             Args = new[]
 *             {
 *                 "10",
 *             },
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v8/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := dataproc.NewBatch(ctx, "example_batch_autotuning", &dataproc.BatchArgs{
 * 			BatchId:  pulumi.String("tf-test-batch_16511"),
 * 			Location: pulumi.String("us-central1"),
 * 			Labels: pulumi.StringMap{
 * 				"batch_test": pulumi.String("terraform"),
 * 			},
 * 			RuntimeConfig: &dataproc.BatchRuntimeConfigArgs{
 * 				Version: pulumi.String("2.2"),
 * 				Properties: pulumi.StringMap{
 * 					"spark.dynamicAllocation.enabled": pulumi.String("false"),
 * 					"spark.executor.instances":        pulumi.String("2"),
 * 				},
 * 				Cohort: pulumi.String("tf-dataproc-batch-example"),
 * 				AutotuningConfig: &dataproc.BatchRuntimeConfigAutotuningConfigArgs{
 * 					Scenarios: pulumi.StringArray{
 * 						pulumi.String("SCALING"),
 * 						pulumi.String("MEMORY"),
 * 					},
 * 				},
 * 			},
 * 			EnvironmentConfig: &dataproc.BatchEnvironmentConfigArgs{
 * 				ExecutionConfig: &dataproc.BatchEnvironmentConfigExecutionConfigArgs{
 * 					SubnetworkUri: pulumi.String("default"),
 * 					Ttl:           pulumi.String("3600s"),
 * 				},
 * 			},
 * 			SparkBatch: &dataproc.BatchSparkBatchArgs{
 * 				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("10"),
 * 				},
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Batch;
 * import com.pulumi.gcp.dataproc.BatchArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchRuntimeConfigAutotuningConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchEnvironmentConfigExecutionConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var exampleBatchAutotuning = new Batch("exampleBatchAutotuning", BatchArgs.builder()
 *             .batchId("tf-test-batch_16511")
 *             .location("us-central1")
 *             .labels(Map.of("batch_test", "terraform"))
 *             .runtimeConfig(BatchRuntimeConfigArgs.builder()
 *                 .version("2.2")
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dynamicAllocation.enabled", "false"),
 *                     Map.entry("spark.executor.instances", "2")
 *                 ))
 *                 .cohort("tf-dataproc-batch-example")
 *                 .autotuningConfig(BatchRuntimeConfigAutotuningConfigArgs.builder()
 *                     .scenarios(
 *                         "SCALING",
 *                         "MEMORY")
 *                     .build())
 *                 .build())
 *             .environmentConfig(BatchEnvironmentConfigArgs.builder()
 *                 .executionConfig(BatchEnvironmentConfigExecutionConfigArgs.builder()
 *                     .subnetworkUri("default")
 *                     .ttl("3600s")
 *                     .build())
 *                 .build())
 *             .sparkBatch(BatchSparkBatchArgs.builder()
 *                 .mainClass("org.apache.spark.examples.SparkPi")
 *                 .args("10")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   exampleBatchAutotuning:
 *     type: gcp:dataproc:Batch
 *     name: example_batch_autotuning
 *     properties:
 *       batchId: tf-test-batch_16511
 *       location: us-central1
 *       labels:
 *         batch_test: terraform
 *       runtimeConfig:
 *         version: '2.2'
 *         properties:
 *           spark.dynamicAllocation.enabled: 'false'
 *           spark.executor.instances: '2'
 *         cohort: tf-dataproc-batch-example
 *         autotuningConfig:
 *           scenarios:
 *             - SCALING
 *             - MEMORY
 *       environmentConfig:
 *         executionConfig:
 *           subnetworkUri: default
 *           ttl: 3600s
 *       sparkBatch:
 *         mainClass: org.apache.spark.examples.SparkPi
 *         args:
 *           - '10'
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 * ```
 * 
 * ## Import
 * Batch can be imported using any of these accepted formats:
 * * `projects/{{project}}/locations/{{location}}/batches/{{batch_id}}`
 * * `{{project}}/{{location}}/{{batch_id}}`
 * * `{{location}}/{{batch_id}}`
 * When using the `pulumi import` command, Batch can be imported using one of the formats above. For example:
 * ```sh
 * $ pulumi import gcp:dataproc/batch:Batch default projects/{{project}}/locations/{{location}}/batches/{{batch_id}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataproc/batch:Batch default {{project}}/{{location}}/{{batch_id}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataproc/batch:Batch default {{location}}/{{batch_id}}
 * ```
 * @property batchId The ID to use for the batch, which will become the final component of the batch's resource name.
 * This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
 * @property environmentConfig Environment configuration for the batch execution.
 * Structure is documented below.
 * @property labels The labels to associate with this batch.
 * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
 * Please refer to the field `effective_labels` for all of the labels present on the resource.
 * @property location The location in which the batch will be created in.
 * @property project The ID of the project in which the resource belongs.
 * If it is not provided, the provider project is used.
 * @property pysparkBatch PySpark batch config.
 * Structure is documented below.
 * @property runtimeConfig Runtime configuration for the batch execution.
 * Structure is documented below.
 * @property sparkBatch Spark batch config.
 * Structure is documented below.
 * @property sparkRBatch SparkR batch config.
 * Structure is documented below.
 * @property sparkSqlBatch Spark SQL batch config.
 * Structure is documented below.
 * */*/*/*/*/*/
 */
public data class BatchArgs(
    public val batchId: Output? = null,
    public val environmentConfig: Output? = null,
    public val labels: Output>? = null,
    public val location: Output? = null,
    public val project: Output? = null,
    public val pysparkBatch: Output? = null,
    public val runtimeConfig: Output? = null,
    public val sparkBatch: Output? = null,
    public val sparkRBatch: Output? = null,
    public val sparkSqlBatch: Output? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.gcp.dataproc.BatchArgs =
        com.pulumi.gcp.dataproc.BatchArgs.builder()
            .batchId(batchId?.applyValue({ args0 -> args0 }))
            .environmentConfig(environmentConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .labels(labels?.applyValue({ args0 -> args0.map({ args0 -> args0.key.to(args0.value) }).toMap() }))
            .location(location?.applyValue({ args0 -> args0 }))
            .project(project?.applyValue({ args0 -> args0 }))
            .pysparkBatch(pysparkBatch?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .runtimeConfig(runtimeConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .sparkBatch(sparkBatch?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .sparkRBatch(sparkRBatch?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .sparkSqlBatch(sparkSqlBatch?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) })).build()
}

/**
 * Builder for [BatchArgs].
 */
@PulumiTagMarker
public class BatchArgsBuilder internal constructor() {
    private var batchId: Output? = null

    private var environmentConfig: Output? = null

    private var labels: Output>? = null

    private var location: Output? = null

    private var project: Output? = null

    private var pysparkBatch: Output? = null

    private var runtimeConfig: Output? = null

    private var sparkBatch: Output? = null

    private var sparkRBatch: Output? = null

    private var sparkSqlBatch: Output? = null

    /**
     * @param value The ID to use for the batch, which will become the final component of the batch's resource name.
     * This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
     */
    @JvmName("ktrjjvuwtoknjcgn")
    public suspend fun batchId(`value`: Output) {
        this.batchId = value
    }

    /**
     * @param value Environment configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("pwfbbdsgfercplqs")
    public suspend fun environmentConfig(`value`: Output) {
        this.environmentConfig = value
    }

    /**
     * @param value The labels to associate with this batch.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
     * Please refer to the field `effective_labels` for all of the labels present on the resource.
     */
    @JvmName("ahmkeayyrpvwkwee")
    public suspend fun labels(`value`: Output>) {
        this.labels = value
    }

    /**
     * @param value The location in which the batch will be created in.
     */
    @JvmName("ueobcujpawckapqh")
    public suspend fun location(`value`: Output) {
        this.location = value
    }

    /**
     * @param value The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    @JvmName("tayelvbbijxexjwd")
    public suspend fun project(`value`: Output) {
        this.project = value
    }

    /**
     * @param value PySpark batch config.
     * Structure is documented below.
     */
    @JvmName("dkfqfwtwdcwtpsri")
    public suspend fun pysparkBatch(`value`: Output) {
        this.pysparkBatch = value
    }

    /**
     * @param value Runtime configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("yxcjudyujtkykslk")
    public suspend fun runtimeConfig(`value`: Output) {
        this.runtimeConfig = value
    }

    /**
     * @param value Spark batch config.
     * Structure is documented below.
     */
    @JvmName("btsfysfovtihtmbl")
    public suspend fun sparkBatch(`value`: Output) {
        this.sparkBatch = value
    }

    /**
     * @param value SparkR batch config.
     * Structure is documented below.
     */
    @JvmName("yhfffxuxvrlvowuc")
    public suspend fun sparkRBatch(`value`: Output) {
        this.sparkRBatch = value
    }

    /**
     * @param value Spark SQL batch config.
     * Structure is documented below.
     */
    @JvmName("gjjwbkdfmabjpbwj")
    public suspend fun sparkSqlBatch(`value`: Output) {
        this.sparkSqlBatch = value
    }

    /**
     * @param value The ID to use for the batch, which will become the final component of the batch's resource name.
     * This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
     */
    @JvmName("xyvexvrelxuvcqsq")
    public suspend fun batchId(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.batchId = mapped
    }

    /**
     * @param value Environment configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("yponncegjdeedxys")
    public suspend fun environmentConfig(`value`: BatchEnvironmentConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.environmentConfig = mapped
    }

    /**
     * @param argument Environment configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("bklxrgsktubnpxnk")
    public suspend fun environmentConfig(argument: suspend BatchEnvironmentConfigArgsBuilder.() -> Unit) {
        val toBeMapped = BatchEnvironmentConfigArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.environmentConfig = mapped
    }

    /**
     * @param value The labels to associate with this batch.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
     * Please refer to the field `effective_labels` for all of the labels present on the resource.
     */
    @JvmName("woumkllpuytaybuw")
    public suspend fun labels(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.labels = mapped
    }

    /**
     * @param values The labels to associate with this batch.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
     * Please refer to the field `effective_labels` for all of the labels present on the resource.
     */
    @JvmName("bpumuivqxqdwidbm")
    public fun labels(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.labels = mapped
    }

    /**
     * @param value The location in which the batch will be created in.
     */
    @JvmName("xqtgfcfmmwdcslpo")
    public suspend fun location(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.location = mapped
    }

    /**
     * @param value The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    @JvmName("hibuybmscqctqmlh")
    public suspend fun project(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.project = mapped
    }

    /**
     * @param value PySpark batch config.
     * Structure is documented below.
     */
    @JvmName("kpkuteqrwfgbddlv")
    public suspend fun pysparkBatch(`value`: BatchPysparkBatchArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.pysparkBatch = mapped
    }

    /**
     * @param argument PySpark batch config.
     * Structure is documented below.
     */
    @JvmName("iuukqcnhxbeykmdj")
    public suspend fun pysparkBatch(argument: suspend BatchPysparkBatchArgsBuilder.() -> Unit) {
        val toBeMapped = BatchPysparkBatchArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.pysparkBatch = mapped
    }

    /**
     * @param value Runtime configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("jvddnefyamgjmhak")
    public suspend fun runtimeConfig(`value`: BatchRuntimeConfigArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.runtimeConfig = mapped
    }

    /**
     * @param argument Runtime configuration for the batch execution.
     * Structure is documented below.
     */
    @JvmName("ccmrmnjqtdeovatn")
    public suspend fun runtimeConfig(argument: suspend BatchRuntimeConfigArgsBuilder.() -> Unit) {
        val toBeMapped = BatchRuntimeConfigArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.runtimeConfig = mapped
    }

    /**
     * @param value Spark batch config.
     * Structure is documented below.
     */
    @JvmName("ypptgsnucurcqcfm")
    public suspend fun sparkBatch(`value`: BatchSparkBatchArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.sparkBatch = mapped
    }

    /**
     * @param argument Spark batch config.
     * Structure is documented below.
     */
    @JvmName("revusprogxorrppk")
    public suspend fun sparkBatch(argument: suspend BatchSparkBatchArgsBuilder.() -> Unit) {
        val toBeMapped = BatchSparkBatchArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.sparkBatch = mapped
    }

    /**
     * @param value SparkR batch config.
     * Structure is documented below.
     */
    @JvmName("haaumawepslicqle")
    public suspend fun sparkRBatch(`value`: BatchSparkRBatchArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.sparkRBatch = mapped
    }

    /**
     * @param argument SparkR batch config.
     * Structure is documented below.
     */
    @JvmName("fnsjsjwbjqqbpxgx")
    public suspend fun sparkRBatch(argument: suspend BatchSparkRBatchArgsBuilder.() -> Unit) {
        val toBeMapped = BatchSparkRBatchArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.sparkRBatch = mapped
    }

    /**
     * @param value Spark SQL batch config.
     * Structure is documented below.
     */
    @JvmName("nvlxfevxtqovucbr")
    public suspend fun sparkSqlBatch(`value`: BatchSparkSqlBatchArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.sparkSqlBatch = mapped
    }

    /**
     * @param argument Spark SQL batch config.
     * Structure is documented below.
     */
    @JvmName("xcskaasyhmlmcwmi")
    public suspend fun sparkSqlBatch(argument: suspend BatchSparkSqlBatchArgsBuilder.() -> Unit) {
        val toBeMapped = BatchSparkSqlBatchArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.sparkSqlBatch = mapped
    }

    internal fun build(): BatchArgs = BatchArgs(
        batchId = batchId,
        environmentConfig = environmentConfig,
        labels = labels,
        location = location,
        project = project,
        pysparkBatch = pysparkBatch,
        runtimeConfig = runtimeConfig,
        sparkBatch = sparkBatch,
        sparkRBatch = sparkRBatch,
        sparkSqlBatch = sparkSqlBatch,
    )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy