All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azure.hdinsight.kotlin.SparkClusterArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 6.21.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.azure.hdinsight.kotlin

import com.pulumi.azure.hdinsight.SparkClusterArgs.builder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComponentVersionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComponentVersionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComputeIsolationArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComputeIsolationArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterDiskEncryptionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterDiskEncryptionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterExtensionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterExtensionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterGatewayArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterGatewayArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMetastoresArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMetastoresArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMonitorArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMonitorArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterNetworkArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterNetworkArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterPrivateLinkConfigurationArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterPrivateLinkConfigurationArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterRolesArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterRolesArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterSecurityProfileArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterSecurityProfileArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountGen2Args
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountGen2ArgsBuilder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Boolean
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 * Manages a HDInsight Spark Cluster.
 * ## Example Usage
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as azure from "@pulumi/azure";
 * const example = new azure.core.ResourceGroup("example", {
 *     name: "example-resources",
 *     location: "West Europe",
 * });
 * const exampleAccount = new azure.storage.Account("example", {
 *     name: "hdinsightstor",
 *     resourceGroupName: example.name,
 *     location: example.location,
 *     accountTier: "Standard",
 *     accountReplicationType: "LRS",
 * });
 * const exampleContainer = new azure.storage.Container("example", {
 *     name: "hdinsight",
 *     storageAccountName: exampleAccount.name,
 *     containerAccessType: "private",
 * });
 * const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
 *     name: "example-hdicluster",
 *     resourceGroupName: example.name,
 *     location: example.location,
 *     clusterVersion: "3.6",
 *     tier: "Standard",
 *     componentVersion: {
 *         spark: "2.3",
 *     },
 *     gateway: {
 *         username: "acctestusrgw",
 *         password: "Password123!",
 *     },
 *     storageAccounts: [{
 *         storageContainerId: exampleContainer.id,
 *         storageAccountKey: exampleAccount.primaryAccessKey,
 *         isDefault: true,
 *     }],
 *     roles: {
 *         headNode: {
 *             vmSize: "Standard_A3",
 *             username: "acctestusrvm",
 *             password: "AccTestvdSC4daf986!",
 *         },
 *         workerNode: {
 *             vmSize: "Standard_A3",
 *             username: "acctestusrvm",
 *             password: "AccTestvdSC4daf986!",
 *             targetInstanceCount: 3,
 *         },
 *         zookeeperNode: {
 *             vmSize: "Medium",
 *             username: "acctestusrvm",
 *             password: "AccTestvdSC4daf986!",
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_azure as azure
 * example = azure.core.ResourceGroup("example",
 *     name="example-resources",
 *     location="West Europe")
 * example_account = azure.storage.Account("example",
 *     name="hdinsightstor",
 *     resource_group_name=example.name,
 *     location=example.location,
 *     account_tier="Standard",
 *     account_replication_type="LRS")
 * example_container = azure.storage.Container("example",
 *     name="hdinsight",
 *     storage_account_name=example_account.name,
 *     container_access_type="private")
 * example_spark_cluster = azure.hdinsight.SparkCluster("example",
 *     name="example-hdicluster",
 *     resource_group_name=example.name,
 *     location=example.location,
 *     cluster_version="3.6",
 *     tier="Standard",
 *     component_version={
 *         "spark": "2.3",
 *     },
 *     gateway={
 *         "username": "acctestusrgw",
 *         "password": "Password123!",
 *     },
 *     storage_accounts=[{
 *         "storage_container_id": example_container.id,
 *         "storage_account_key": example_account.primary_access_key,
 *         "is_default": True,
 *     }],
 *     roles={
 *         "head_node": {
 *             "vm_size": "Standard_A3",
 *             "username": "acctestusrvm",
 *             "password": "AccTestvdSC4daf986!",
 *         },
 *         "worker_node": {
 *             "vm_size": "Standard_A3",
 *             "username": "acctestusrvm",
 *             "password": "AccTestvdSC4daf986!",
 *             "target_instance_count": 3,
 *         },
 *         "zookeeper_node": {
 *             "vm_size": "Medium",
 *             "username": "acctestusrvm",
 *             "password": "AccTestvdSC4daf986!",
 *         },
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Azure = Pulumi.Azure;
 * return await Deployment.RunAsync(() =>
 * {
 *     var example = new Azure.Core.ResourceGroup("example", new()
 *     {
 *         Name = "example-resources",
 *         Location = "West Europe",
 *     });
 *     var exampleAccount = new Azure.Storage.Account("example", new()
 *     {
 *         Name = "hdinsightstor",
 *         ResourceGroupName = example.Name,
 *         Location = example.Location,
 *         AccountTier = "Standard",
 *         AccountReplicationType = "LRS",
 *     });
 *     var exampleContainer = new Azure.Storage.Container("example", new()
 *     {
 *         Name = "hdinsight",
 *         StorageAccountName = exampleAccount.Name,
 *         ContainerAccessType = "private",
 *     });
 *     var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
 *     {
 *         Name = "example-hdicluster",
 *         ResourceGroupName = example.Name,
 *         Location = example.Location,
 *         ClusterVersion = "3.6",
 *         Tier = "Standard",
 *         ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
 *         {
 *             Spark = "2.3",
 *         },
 *         Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
 *         {
 *             Username = "acctestusrgw",
 *             Password = "Password123!",
 *         },
 *         StorageAccounts = new[]
 *         {
 *             new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
 *             {
 *                 StorageContainerId = exampleContainer.Id,
 *                 StorageAccountKey = exampleAccount.PrimaryAccessKey,
 *                 IsDefault = true,
 *             },
 *         },
 *         Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
 *         {
 *             HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
 *             {
 *                 VmSize = "Standard_A3",
 *                 Username = "acctestusrvm",
 *                 Password = "AccTestvdSC4daf986!",
 *             },
 *             WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
 *             {
 *                 VmSize = "Standard_A3",
 *                 Username = "acctestusrvm",
 *                 Password = "AccTestvdSC4daf986!",
 *                 TargetInstanceCount = 3,
 *             },
 *             ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
 *             {
 *                 VmSize = "Medium",
 *                 Username = "acctestusrvm",
 *                 Password = "AccTestvdSC4daf986!",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
 * 	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/hdinsight"
 * 	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
 * 			Name:     pulumi.String("example-resources"),
 * 			Location: pulumi.String("West Europe"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
 * 			Name:                   pulumi.String("hdinsightstor"),
 * 			ResourceGroupName:      example.Name,
 * 			Location:               example.Location,
 * 			AccountTier:            pulumi.String("Standard"),
 * 			AccountReplicationType: pulumi.String("LRS"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
 * 			Name:                pulumi.String("hdinsight"),
 * 			StorageAccountName:  exampleAccount.Name,
 * 			ContainerAccessType: pulumi.String("private"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
 * 			Name:              pulumi.String("example-hdicluster"),
 * 			ResourceGroupName: example.Name,
 * 			Location:          example.Location,
 * 			ClusterVersion:    pulumi.String("3.6"),
 * 			Tier:              pulumi.String("Standard"),
 * 			ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
 * 				Spark: pulumi.String("2.3"),
 * 			},
 * 			Gateway: &hdinsight.SparkClusterGatewayArgs{
 * 				Username: pulumi.String("acctestusrgw"),
 * 				Password: pulumi.String("Password123!"),
 * 			},
 * 			StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
 * 				&hdinsight.SparkClusterStorageAccountArgs{
 * 					StorageContainerId: exampleContainer.ID(),
 * 					StorageAccountKey:  exampleAccount.PrimaryAccessKey,
 * 					IsDefault:          pulumi.Bool(true),
 * 				},
 * 			},
 * 			Roles: &hdinsight.SparkClusterRolesArgs{
 * 				HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
 * 					VmSize:   pulumi.String("Standard_A3"),
 * 					Username: pulumi.String("acctestusrvm"),
 * 					Password: pulumi.String("AccTestvdSC4daf986!"),
 * 				},
 * 				WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
 * 					VmSize:              pulumi.String("Standard_A3"),
 * 					Username:            pulumi.String("acctestusrvm"),
 * 					Password:            pulumi.String("AccTestvdSC4daf986!"),
 * 					TargetInstanceCount: pulumi.Int(3),
 * 				},
 * 				ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
 * 					VmSize:   pulumi.String("Medium"),
 * 					Username: pulumi.String("acctestusrvm"),
 * 					Password: pulumi.String("AccTestvdSC4daf986!"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.azure.core.ResourceGroup;
 * import com.pulumi.azure.core.ResourceGroupArgs;
 * import com.pulumi.azure.storage.Account;
 * import com.pulumi.azure.storage.AccountArgs;
 * import com.pulumi.azure.storage.Container;
 * import com.pulumi.azure.storage.ContainerArgs;
 * import com.pulumi.azure.hdinsight.SparkCluster;
 * import com.pulumi.azure.hdinsight.SparkClusterArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
 * import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var example = new ResourceGroup("example", ResourceGroupArgs.builder()
 *             .name("example-resources")
 *             .location("West Europe")
 *             .build());
 *         var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
 *             .name("hdinsightstor")
 *             .resourceGroupName(example.name())
 *             .location(example.location())
 *             .accountTier("Standard")
 *             .accountReplicationType("LRS")
 *             .build());
 *         var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
 *             .name("hdinsight")
 *             .storageAccountName(exampleAccount.name())
 *             .containerAccessType("private")
 *             .build());
 *         var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
 *             .name("example-hdicluster")
 *             .resourceGroupName(example.name())
 *             .location(example.location())
 *             .clusterVersion("3.6")
 *             .tier("Standard")
 *             .componentVersion(SparkClusterComponentVersionArgs.builder()
 *                 .spark("2.3")
 *                 .build())
 *             .gateway(SparkClusterGatewayArgs.builder()
 *                 .username("acctestusrgw")
 *                 .password("Password123!")
 *                 .build())
 *             .storageAccounts(SparkClusterStorageAccountArgs.builder()
 *                 .storageContainerId(exampleContainer.id())
 *                 .storageAccountKey(exampleAccount.primaryAccessKey())
 *                 .isDefault(true)
 *                 .build())
 *             .roles(SparkClusterRolesArgs.builder()
 *                 .headNode(SparkClusterRolesHeadNodeArgs.builder()
 *                     .vmSize("Standard_A3")
 *                     .username("acctestusrvm")
 *                     .password("AccTestvdSC4daf986!")
 *                     .build())
 *                 .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
 *                     .vmSize("Standard_A3")
 *                     .username("acctestusrvm")
 *                     .password("AccTestvdSC4daf986!")
 *                     .targetInstanceCount(3)
 *                     .build())
 *                 .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
 *                     .vmSize("Medium")
 *                     .username("acctestusrvm")
 *                     .password("AccTestvdSC4daf986!")
 *                     .build())
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   example:
 *     type: azure:core:ResourceGroup
 *     properties:
 *       name: example-resources
 *       location: West Europe
 *   exampleAccount:
 *     type: azure:storage:Account
 *     name: example
 *     properties:
 *       name: hdinsightstor
 *       resourceGroupName: ${example.name}
 *       location: ${example.location}
 *       accountTier: Standard
 *       accountReplicationType: LRS
 *   exampleContainer:
 *     type: azure:storage:Container
 *     name: example
 *     properties:
 *       name: hdinsight
 *       storageAccountName: ${exampleAccount.name}
 *       containerAccessType: private
 *   exampleSparkCluster:
 *     type: azure:hdinsight:SparkCluster
 *     name: example
 *     properties:
 *       name: example-hdicluster
 *       resourceGroupName: ${example.name}
 *       location: ${example.location}
 *       clusterVersion: '3.6'
 *       tier: Standard
 *       componentVersion:
 *         spark: '2.3'
 *       gateway:
 *         username: acctestusrgw
 *         password: Password123!
 *       storageAccounts:
 *         - storageContainerId: ${exampleContainer.id}
 *           storageAccountKey: ${exampleAccount.primaryAccessKey}
 *           isDefault: true
 *       roles:
 *         headNode:
 *           vmSize: Standard_A3
 *           username: acctestusrvm
 *           password: AccTestvdSC4daf986!
 *         workerNode:
 *           vmSize: Standard_A3
 *           username: acctestusrvm
 *           password: AccTestvdSC4daf986!
 *           targetInstanceCount: 3
 *         zookeeperNode:
 *           vmSize: Medium
 *           username: acctestusrvm
 *           password: AccTestvdSC4daf986!
 * ```
 * 
 * ## Import
 * HDInsight Spark Clusters can be imported using the `resource id`, e.g.
 * ```sh
 * $ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
 * ```
 * @property clusterVersion Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
 * @property componentVersion A `component_version` block as defined below.
 * @property computeIsolation A `compute_isolation` block as defined below.
 * @property diskEncryptions One or more `disk_encryption` block as defined below.
 * @property encryptionInTransitEnabled Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
 * @property extension An `extension` block as defined below.
 * @property gateway A `gateway` block as defined below.
 * @property location Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
 * @property metastores A `metastores` block as defined below.
 * @property monitor A `monitor` block as defined below.
 * @property name Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
 * @property network A `network` block as defined below.
 * @property privateLinkConfiguration A `private_link_configuration` block as defined below.
 * @property resourceGroupName Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
 * @property roles A `roles` block as defined below.
 * @property securityProfile A `security_profile` block as defined below. Changing this forces a new resource to be created.
 * @property storageAccountGen2 A `storage_account_gen2` block as defined below.
 * @property storageAccounts One or more `storage_account` block as defined below.
 * @property tags A map of Tags which should be assigned to this HDInsight Spark Cluster.
 * @property tier Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created.
 * @property tlsMinVersion The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.
 * > **NOTE:** Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see [Azure HDInsight TLS 1.2 Enforcement](https://azure.microsoft.com/en-us/updates/azure-hdinsight-tls-12-enforcement/).
 */
public data class SparkClusterArgs(
    public val clusterVersion: Output? = null,
    public val componentVersion: Output? = null,
    public val computeIsolation: Output? = null,
    public val diskEncryptions: Output>? = null,
    public val encryptionInTransitEnabled: Output? = null,
    public val extension: Output? = null,
    public val gateway: Output? = null,
    public val location: Output? = null,
    public val metastores: Output? = null,
    public val monitor: Output? = null,
    public val name: Output? = null,
    public val network: Output? = null,
    public val privateLinkConfiguration: Output? = null,
    public val resourceGroupName: Output? = null,
    public val roles: Output? = null,
    public val securityProfile: Output? = null,
    public val storageAccountGen2: Output? = null,
    public val storageAccounts: Output>? = null,
    public val tags: Output>? = null,
    public val tier: Output? = null,
    public val tlsMinVersion: Output? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.azure.hdinsight.SparkClusterArgs =
        com.pulumi.azure.hdinsight.SparkClusterArgs.builder()
            .clusterVersion(clusterVersion?.applyValue({ args0 -> args0 }))
            .componentVersion(componentVersion?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .computeIsolation(computeIsolation?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .diskEncryptions(
                diskEncryptions?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.let({ args0 ->
                            args0.toJava()
                        })
                    })
                }),
            )
            .encryptionInTransitEnabled(encryptionInTransitEnabled?.applyValue({ args0 -> args0 }))
            .extension(extension?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .gateway(gateway?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .location(location?.applyValue({ args0 -> args0 }))
            .metastores(metastores?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .monitor(monitor?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .name(name?.applyValue({ args0 -> args0 }))
            .network(network?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .privateLinkConfiguration(
                privateLinkConfiguration?.applyValue({ args0 ->
                    args0.let({ args0 ->
                        args0.toJava()
                    })
                }),
            )
            .resourceGroupName(resourceGroupName?.applyValue({ args0 -> args0 }))
            .roles(roles?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .securityProfile(securityProfile?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .storageAccountGen2(
                storageAccountGen2?.applyValue({ args0 ->
                    args0.let({ args0 ->
                        args0.toJava()
                    })
                }),
            )
            .storageAccounts(
                storageAccounts?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.let({ args0 ->
                            args0.toJava()
                        })
                    })
                }),
            )
            .tags(tags?.applyValue({ args0 -> args0.map({ args0 -> args0.key.to(args0.value) }).toMap() }))
            .tier(tier?.applyValue({ args0 -> args0 }))
            .tlsMinVersion(tlsMinVersion?.applyValue({ args0 -> args0 })).build()
}

/**
 * Builder for [SparkClusterArgs].
 */
@PulumiTagMarker
public class SparkClusterArgsBuilder internal constructor() {
    private var clusterVersion: Output? = null

    private var componentVersion: Output? = null

    private var computeIsolation: Output? = null

    private var diskEncryptions: Output>? = null

    private var encryptionInTransitEnabled: Output? = null

    private var extension: Output? = null

    private var gateway: Output? = null

    private var location: Output? = null

    private var metastores: Output? = null

    private var monitor: Output? = null

    private var name: Output? = null

    private var network: Output? = null

    private var privateLinkConfiguration: Output? = null

    private var resourceGroupName: Output? = null

    private var roles: Output? = null

    private var securityProfile: Output? = null

    private var storageAccountGen2: Output? = null

    private var storageAccounts: Output>? = null

    private var tags: Output>? = null

    private var tier: Output? = null

    private var tlsMinVersion: Output? = null

    /**
     * @param value Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("vtcbbutsytgwyrci")
    public suspend fun clusterVersion(`value`: Output) {
        this.clusterVersion = value
    }

    /**
     * @param value A `component_version` block as defined below.
     */
    @JvmName("xdicbjpbpcnqrrsa")
    public suspend fun componentVersion(`value`: Output) {
        this.componentVersion = value
    }

    /**
     * @param value A `compute_isolation` block as defined below.
     */
    @JvmName("vcinyotgaqygadtu")
    public suspend fun computeIsolation(`value`: Output) {
        this.computeIsolation = value
    }

    /**
     * @param value One or more `disk_encryption` block as defined below.
     */
    @JvmName("qudgnjmjdeacjsnb")
    public suspend fun diskEncryptions(`value`: Output>) {
        this.diskEncryptions = value
    }

    @JvmName("gcslvmqsyauxxrgg")
    public suspend fun diskEncryptions(vararg values: Output) {
        this.diskEncryptions = Output.all(values.asList())
    }

    /**
     * @param values One or more `disk_encryption` block as defined below.
     */
    @JvmName("bfjgyvofggflmvvr")
    public suspend fun diskEncryptions(values: List>) {
        this.diskEncryptions = Output.all(values)
    }

    /**
     * @param value Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("tsrtoesauopjcype")
    public suspend fun encryptionInTransitEnabled(`value`: Output) {
        this.encryptionInTransitEnabled = value
    }

    /**
     * @param value An `extension` block as defined below.
     */
    @JvmName("vjdqlxqiffdkdgrn")
    public suspend fun extension(`value`: Output) {
        this.extension = value
    }

    /**
     * @param value A `gateway` block as defined below.
     */
    @JvmName("fguksbveiikssucg")
    public suspend fun gateway(`value`: Output) {
        this.gateway = value
    }

    /**
     * @param value Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
     */
    @JvmName("jalymfohiumiosrt")
    public suspend fun location(`value`: Output) {
        this.location = value
    }

    /**
     * @param value A `metastores` block as defined below.
     */
    @JvmName("vxbqjbmechflacab")
    public suspend fun metastores(`value`: Output) {
        this.metastores = value
    }

    /**
     * @param value A `monitor` block as defined below.
     */
    @JvmName("aqgbrokayhoooqwp")
    public suspend fun monitor(`value`: Output) {
        this.monitor = value
    }

    /**
     * @param value Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("sxhmqilokdmgjedq")
    public suspend fun name(`value`: Output) {
        this.name = value
    }

    /**
     * @param value A `network` block as defined below.
     */
    @JvmName("lhnojxwlfivpiwso")
    public suspend fun network(`value`: Output) {
        this.network = value
    }

    /**
     * @param value A `private_link_configuration` block as defined below.
     */
    @JvmName("uhlxskgevtwwvmlo")
    public suspend fun privateLinkConfiguration(`value`: Output) {
        this.privateLinkConfiguration = value
    }

    /**
     * @param value Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
     */
    @JvmName("punlqeutavejcwhq")
    public suspend fun resourceGroupName(`value`: Output) {
        this.resourceGroupName = value
    }

    /**
     * @param value A `roles` block as defined below.
     */
    @JvmName("mnbewbyrvbdsebve")
    public suspend fun roles(`value`: Output) {
        this.roles = value
    }

    /**
     * @param value A `security_profile` block as defined below. Changing this forces a new resource to be created.
     */
    @JvmName("huhamanjaiplopgy")
    public suspend fun securityProfile(`value`: Output) {
        this.securityProfile = value
    }

    /**
     * @param value A `storage_account_gen2` block as defined below.
     */
    @JvmName("kyacbdfrfccybyyf")
    public suspend fun storageAccountGen2(`value`: Output) {
        this.storageAccountGen2 = value
    }

    /**
     * @param value One or more `storage_account` block as defined below.
     */
    @JvmName("giyfjkuvkvsnjbdv")
    public suspend fun storageAccounts(`value`: Output>) {
        this.storageAccounts = value
    }

    @JvmName("mjkmskvcddhycyah")
    public suspend fun storageAccounts(vararg values: Output) {
        this.storageAccounts = Output.all(values.asList())
    }

    /**
     * @param values One or more `storage_account` block as defined below.
     */
    @JvmName("xkjmgvrunallxonq")
    public suspend fun storageAccounts(values: List>) {
        this.storageAccounts = Output.all(values)
    }

    /**
     * @param value A map of Tags which should be assigned to this HDInsight Spark Cluster.
     */
    @JvmName("lipgnedvoeilrulv")
    public suspend fun tags(`value`: Output>) {
        this.tags = value
    }

    /**
     * @param value Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created.
     */
    @JvmName("glrrjpmwbwrlyhax")
    public suspend fun tier(`value`: Output) {
        this.tier = value
    }

    /**
     * @param value The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.
     * > **NOTE:** Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see [Azure HDInsight TLS 1.2 Enforcement](https://azure.microsoft.com/en-us/updates/azure-hdinsight-tls-12-enforcement/).
     */
    @JvmName("aajsrkesnarqvsnp")
    public suspend fun tlsMinVersion(`value`: Output) {
        this.tlsMinVersion = value
    }

    /**
     * @param value Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("sbbupwqbcuhrwgil")
    public suspend fun clusterVersion(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.clusterVersion = mapped
    }

    /**
     * @param value A `component_version` block as defined below.
     */
    @JvmName("okkoivaywdxagilq")
    public suspend fun componentVersion(`value`: SparkClusterComponentVersionArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.componentVersion = mapped
    }

    /**
     * @param argument A `component_version` block as defined below.
     */
    @JvmName("atonwbllmuajpnhn")
    public suspend fun componentVersion(argument: suspend SparkClusterComponentVersionArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterComponentVersionArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.componentVersion = mapped
    }

    /**
     * @param value A `compute_isolation` block as defined below.
     */
    @JvmName("bkrakgcqptgarhyu")
    public suspend fun computeIsolation(`value`: SparkClusterComputeIsolationArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.computeIsolation = mapped
    }

    /**
     * @param argument A `compute_isolation` block as defined below.
     */
    @JvmName("ntcexvpwphjhqfsg")
    public suspend fun computeIsolation(argument: suspend SparkClusterComputeIsolationArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterComputeIsolationArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.computeIsolation = mapped
    }

    /**
     * @param value One or more `disk_encryption` block as defined below.
     */
    @JvmName("ecunnkanultcoiky")
    public suspend fun diskEncryptions(`value`: List?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.diskEncryptions = mapped
    }

    /**
     * @param argument One or more `disk_encryption` block as defined below.
     */
    @JvmName("gjamoulqrtvnqdpp")
    public suspend fun diskEncryptions(argument: List Unit>) {
        val toBeMapped = argument.toList().map {
            SparkClusterDiskEncryptionArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.diskEncryptions = mapped
    }

    /**
     * @param argument One or more `disk_encryption` block as defined below.
     */
    @JvmName("tlddopksncplnvtk")
    public suspend fun diskEncryptions(vararg argument: suspend SparkClusterDiskEncryptionArgsBuilder.() -> Unit) {
        val toBeMapped = argument.toList().map {
            SparkClusterDiskEncryptionArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.diskEncryptions = mapped
    }

    /**
     * @param argument One or more `disk_encryption` block as defined below.
     */
    @JvmName("dvpagufsutnvfaka")
    public suspend fun diskEncryptions(argument: suspend SparkClusterDiskEncryptionArgsBuilder.() -> Unit) {
        val toBeMapped = listOf(
            SparkClusterDiskEncryptionArgsBuilder().applySuspend {
                argument()
            }.build(),
        )
        val mapped = of(toBeMapped)
        this.diskEncryptions = mapped
    }

    /**
     * @param values One or more `disk_encryption` block as defined below.
     */
    @JvmName("cjibqldddsysqrop")
    public suspend fun diskEncryptions(vararg values: SparkClusterDiskEncryptionArgs) {
        val toBeMapped = values.toList()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.diskEncryptions = mapped
    }

    /**
     * @param value Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("lpbkxijsfwpkhnmu")
    public suspend fun encryptionInTransitEnabled(`value`: Boolean?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.encryptionInTransitEnabled = mapped
    }

    /**
     * @param value An `extension` block as defined below.
     */
    @JvmName("hpkdjbtmqlaqcqqr")
    public suspend fun extension(`value`: SparkClusterExtensionArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.extension = mapped
    }

    /**
     * @param argument An `extension` block as defined below.
     */
    @JvmName("oeycctdhpxfymqwa")
    public suspend fun extension(argument: suspend SparkClusterExtensionArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterExtensionArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.extension = mapped
    }

    /**
     * @param value A `gateway` block as defined below.
     */
    @JvmName("byxvixknekpilnwu")
    public suspend fun gateway(`value`: SparkClusterGatewayArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.gateway = mapped
    }

    /**
     * @param argument A `gateway` block as defined below.
     */
    @JvmName("jakjlketklfylfnv")
    public suspend fun gateway(argument: suspend SparkClusterGatewayArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterGatewayArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.gateway = mapped
    }

    /**
     * @param value Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
     */
    @JvmName("rumobcfwojihxeia")
    public suspend fun location(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.location = mapped
    }

    /**
     * @param value A `metastores` block as defined below.
     */
    @JvmName("kptxdgdylpergdfs")
    public suspend fun metastores(`value`: SparkClusterMetastoresArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.metastores = mapped
    }

    /**
     * @param argument A `metastores` block as defined below.
     */
    @JvmName("jhprddpsbvgexpnl")
    public suspend fun metastores(argument: suspend SparkClusterMetastoresArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterMetastoresArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.metastores = mapped
    }

    /**
     * @param value A `monitor` block as defined below.
     */
    @JvmName("jujmoyqtbvrjfpue")
    public suspend fun monitor(`value`: SparkClusterMonitorArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.monitor = mapped
    }

    /**
     * @param argument A `monitor` block as defined below.
     */
    @JvmName("kdxdnsxsxdaueynn")
    public suspend fun monitor(argument: suspend SparkClusterMonitorArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterMonitorArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.monitor = mapped
    }

    /**
     * @param value Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
     */
    @JvmName("fwkhetjaunggqeht")
    public suspend fun name(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.name = mapped
    }

    /**
     * @param value A `network` block as defined below.
     */
    @JvmName("xljoihtjroeuqkce")
    public suspend fun network(`value`: SparkClusterNetworkArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.network = mapped
    }

    /**
     * @param argument A `network` block as defined below.
     */
    @JvmName("wpakwrbtlmvrybgl")
    public suspend fun network(argument: suspend SparkClusterNetworkArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterNetworkArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.network = mapped
    }

    /**
     * @param value A `private_link_configuration` block as defined below.
     */
    @JvmName("vdwhtwihjosriguu")
    public suspend fun privateLinkConfiguration(`value`: SparkClusterPrivateLinkConfigurationArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.privateLinkConfiguration = mapped
    }

    /**
     * @param argument A `private_link_configuration` block as defined below.
     */
    @JvmName("lrbbgyvrebvqnhbb")
    public suspend fun privateLinkConfiguration(argument: suspend SparkClusterPrivateLinkConfigurationArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterPrivateLinkConfigurationArgsBuilder().applySuspend {
            argument()
        }.build()
        val mapped = of(toBeMapped)
        this.privateLinkConfiguration = mapped
    }

    /**
     * @param value Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
     */
    @JvmName("ustrqfqgqpjxtwhd")
    public suspend fun resourceGroupName(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.resourceGroupName = mapped
    }

    /**
     * @param value A `roles` block as defined below.
     */
    @JvmName("bnvjbtcuiaxiskqn")
    public suspend fun roles(`value`: SparkClusterRolesArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.roles = mapped
    }

    /**
     * @param argument A `roles` block as defined below.
     */
    @JvmName("vfbpiblxpewiojna")
    public suspend fun roles(argument: suspend SparkClusterRolesArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterRolesArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.roles = mapped
    }

    /**
     * @param value A `security_profile` block as defined below. Changing this forces a new resource to be created.
     */
    @JvmName("eguauqyhjsvmbqvp")
    public suspend fun securityProfile(`value`: SparkClusterSecurityProfileArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.securityProfile = mapped
    }

    /**
     * @param argument A `security_profile` block as defined below. Changing this forces a new resource to be created.
     */
    @JvmName("plmduykkjcmwwkeh")
    public suspend fun securityProfile(argument: suspend SparkClusterSecurityProfileArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterSecurityProfileArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.securityProfile = mapped
    }

    /**
     * @param value A `storage_account_gen2` block as defined below.
     */
    @JvmName("omxyywopwmioijxo")
    public suspend fun storageAccountGen2(`value`: SparkClusterStorageAccountGen2Args?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.storageAccountGen2 = mapped
    }

    /**
     * @param argument A `storage_account_gen2` block as defined below.
     */
    @JvmName("qwenqmustyheodmt")
    public suspend fun storageAccountGen2(argument: suspend SparkClusterStorageAccountGen2ArgsBuilder.() -> Unit) {
        val toBeMapped = SparkClusterStorageAccountGen2ArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.storageAccountGen2 = mapped
    }

    /**
     * @param value One or more `storage_account` block as defined below.
     */
    @JvmName("nwxvwkheckvaqioo")
    public suspend fun storageAccounts(`value`: List?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.storageAccounts = mapped
    }

    /**
     * @param argument One or more `storage_account` block as defined below.
     */
    @JvmName("twoilaylbrpojpak")
    public suspend fun storageAccounts(argument: List Unit>) {
        val toBeMapped = argument.toList().map {
            SparkClusterStorageAccountArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.storageAccounts = mapped
    }

    /**
     * @param argument One or more `storage_account` block as defined below.
     */
    @JvmName("fxiswwwgeaerievr")
    public suspend fun storageAccounts(vararg argument: suspend SparkClusterStorageAccountArgsBuilder.() -> Unit) {
        val toBeMapped = argument.toList().map {
            SparkClusterStorageAccountArgsBuilder().applySuspend {
                it()
            }.build()
        }
        val mapped = of(toBeMapped)
        this.storageAccounts = mapped
    }

    /**
     * @param argument One or more `storage_account` block as defined below.
     */
    @JvmName("tcdtnrwdqtqoqcxy")
    public suspend fun storageAccounts(argument: suspend SparkClusterStorageAccountArgsBuilder.() -> Unit) {
        val toBeMapped = listOf(
            SparkClusterStorageAccountArgsBuilder().applySuspend {
                argument()
            }.build(),
        )
        val mapped = of(toBeMapped)
        this.storageAccounts = mapped
    }

    /**
     * @param values One or more `storage_account` block as defined below.
     */
    @JvmName("gghfmbnujrvtymyq")
    public suspend fun storageAccounts(vararg values: SparkClusterStorageAccountArgs) {
        val toBeMapped = values.toList()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.storageAccounts = mapped
    }

    /**
     * @param value A map of Tags which should be assigned to this HDInsight Spark Cluster.
     */
    @JvmName("ttigifiwbwyrbchy")
    public suspend fun tags(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.tags = mapped
    }

    /**
     * @param values A map of Tags which should be assigned to this HDInsight Spark Cluster.
     */
    @JvmName("vltydapapysaroup")
    public fun tags(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.tags = mapped
    }

    /**
     * @param value Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created.
     */
    @JvmName("tptvhemlrwjugvec")
    public suspend fun tier(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.tier = mapped
    }

    /**
     * @param value The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.
     * > **NOTE:** Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see [Azure HDInsight TLS 1.2 Enforcement](https://azure.microsoft.com/en-us/updates/azure-hdinsight-tls-12-enforcement/).
     */
    @JvmName("vqygyvgvtvstanqm")
    public suspend fun tlsMinVersion(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.tlsMinVersion = mapped
    }

    internal fun build(): SparkClusterArgs = SparkClusterArgs(
        clusterVersion = clusterVersion,
        componentVersion = componentVersion,
        computeIsolation = computeIsolation,
        diskEncryptions = diskEncryptions,
        encryptionInTransitEnabled = encryptionInTransitEnabled,
        extension = extension,
        gateway = gateway,
        location = location,
        metastores = metastores,
        monitor = monitor,
        name = name,
        network = network,
        privateLinkConfiguration = privateLinkConfiguration,
        resourceGroupName = resourceGroupName,
        roles = roles,
        securityProfile = securityProfile,
        storageAccountGen2 = storageAccountGen2,
        storageAccounts = storageAccounts,
        tags = tags,
        tier = tier,
        tlsMinVersion = tlsMinVersion,
    )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy