
com.pulumi.azure.hdinsight.kotlin.SparkClusterArgs.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-azure-kotlin Show documentation
Show all versions of pulumi-azure-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.azure.hdinsight.kotlin
import com.pulumi.azure.hdinsight.SparkClusterArgs.builder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComponentVersionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComponentVersionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComputeIsolationArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterComputeIsolationArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterDiskEncryptionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterDiskEncryptionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterExtensionArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterExtensionArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterGatewayArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterGatewayArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMetastoresArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMetastoresArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMonitorArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterMonitorArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterNetworkArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterNetworkArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterPrivateLinkConfigurationArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterPrivateLinkConfigurationArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterRolesArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterRolesArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterSecurityProfileArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterSecurityProfileArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountArgs
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountArgsBuilder
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountGen2Args
import com.pulumi.azure.hdinsight.kotlin.inputs.SparkClusterStorageAccountGen2ArgsBuilder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Boolean
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.collections.Map
import kotlin.jvm.JvmName
/**
* Manages a HDInsight Spark Cluster.
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as azure from "@pulumi/azure";
* const example = new azure.core.ResourceGroup("example", {
* name: "example-resources",
* location: "West Europe",
* });
* const exampleAccount = new azure.storage.Account("example", {
* name: "hdinsightstor",
* resourceGroupName: example.name,
* location: example.location,
* accountTier: "Standard",
* accountReplicationType: "LRS",
* });
* const exampleContainer = new azure.storage.Container("example", {
* name: "hdinsight",
* storageAccountName: exampleAccount.name,
* containerAccessType: "private",
* });
* const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
* name: "example-hdicluster",
* resourceGroupName: example.name,
* location: example.location,
* clusterVersion: "3.6",
* tier: "Standard",
* componentVersion: {
* spark: "2.3",
* },
* gateway: {
* username: "acctestusrgw",
* password: "Password123!",
* },
* storageAccounts: [{
* storageContainerId: exampleContainer.id,
* storageAccountKey: exampleAccount.primaryAccessKey,
* isDefault: true,
* }],
* roles: {
* headNode: {
* vmSize: "Standard_A3",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* },
* workerNode: {
* vmSize: "Standard_A3",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* targetInstanceCount: 3,
* },
* zookeeperNode: {
* vmSize: "Medium",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* },
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_azure as azure
* example = azure.core.ResourceGroup("example",
* name="example-resources",
* location="West Europe")
* example_account = azure.storage.Account("example",
* name="hdinsightstor",
* resource_group_name=example.name,
* location=example.location,
* account_tier="Standard",
* account_replication_type="LRS")
* example_container = azure.storage.Container("example",
* name="hdinsight",
* storage_account_name=example_account.name,
* container_access_type="private")
* example_spark_cluster = azure.hdinsight.SparkCluster("example",
* name="example-hdicluster",
* resource_group_name=example.name,
* location=example.location,
* cluster_version="3.6",
* tier="Standard",
* component_version={
* "spark": "2.3",
* },
* gateway={
* "username": "acctestusrgw",
* "password": "Password123!",
* },
* storage_accounts=[{
* "storage_container_id": example_container.id,
* "storage_account_key": example_account.primary_access_key,
* "is_default": True,
* }],
* roles={
* "head_node": {
* "vm_size": "Standard_A3",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* },
* "worker_node": {
* "vm_size": "Standard_A3",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* "target_instance_count": 3,
* },
* "zookeeper_node": {
* "vm_size": "Medium",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* },
* })
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Azure = Pulumi.Azure;
* return await Deployment.RunAsync(() =>
* {
* var example = new Azure.Core.ResourceGroup("example", new()
* {
* Name = "example-resources",
* Location = "West Europe",
* });
* var exampleAccount = new Azure.Storage.Account("example", new()
* {
* Name = "hdinsightstor",
* ResourceGroupName = example.Name,
* Location = example.Location,
* AccountTier = "Standard",
* AccountReplicationType = "LRS",
* });
* var exampleContainer = new Azure.Storage.Container("example", new()
* {
* Name = "hdinsight",
* StorageAccountName = exampleAccount.Name,
* ContainerAccessType = "private",
* });
* var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
* {
* Name = "example-hdicluster",
* ResourceGroupName = example.Name,
* Location = example.Location,
* ClusterVersion = "3.6",
* Tier = "Standard",
* ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
* {
* Spark = "2.3",
* },
* Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
* {
* Username = "acctestusrgw",
* Password = "Password123!",
* },
* StorageAccounts = new[]
* {
* new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
* {
* StorageContainerId = exampleContainer.Id,
* StorageAccountKey = exampleAccount.PrimaryAccessKey,
* IsDefault = true,
* },
* },
* Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
* {
* HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
* {
* VmSize = "Standard_A3",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* },
* WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
* {
* VmSize = "Standard_A3",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* TargetInstanceCount = 3,
* },
* ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
* {
* VmSize = "Medium",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/hdinsight"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
* Name: pulumi.String("example-resources"),
* Location: pulumi.String("West Europe"),
* })
* if err != nil {
* return err
* }
* exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
* Name: pulumi.String("hdinsightstor"),
* ResourceGroupName: example.Name,
* Location: example.Location,
* AccountTier: pulumi.String("Standard"),
* AccountReplicationType: pulumi.String("LRS"),
* })
* if err != nil {
* return err
* }
* exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
* Name: pulumi.String("hdinsight"),
* StorageAccountName: exampleAccount.Name,
* ContainerAccessType: pulumi.String("private"),
* })
* if err != nil {
* return err
* }
* _, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
* Name: pulumi.String("example-hdicluster"),
* ResourceGroupName: example.Name,
* Location: example.Location,
* ClusterVersion: pulumi.String("3.6"),
* Tier: pulumi.String("Standard"),
* ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
* Spark: pulumi.String("2.3"),
* },
* Gateway: &hdinsight.SparkClusterGatewayArgs{
* Username: pulumi.String("acctestusrgw"),
* Password: pulumi.String("Password123!"),
* },
* StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
* &hdinsight.SparkClusterStorageAccountArgs{
* StorageContainerId: exampleContainer.ID(),
* StorageAccountKey: exampleAccount.PrimaryAccessKey,
* IsDefault: pulumi.Bool(true),
* },
* },
* Roles: &hdinsight.SparkClusterRolesArgs{
* HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
* VmSize: pulumi.String("Standard_A3"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* },
* WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
* VmSize: pulumi.String("Standard_A3"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* TargetInstanceCount: pulumi.Int(3),
* },
* ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
* VmSize: pulumi.String("Medium"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.azure.core.ResourceGroup;
* import com.pulumi.azure.core.ResourceGroupArgs;
* import com.pulumi.azure.storage.Account;
* import com.pulumi.azure.storage.AccountArgs;
* import com.pulumi.azure.storage.Container;
* import com.pulumi.azure.storage.ContainerArgs;
* import com.pulumi.azure.hdinsight.SparkCluster;
* import com.pulumi.azure.hdinsight.SparkClusterArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var example = new ResourceGroup("example", ResourceGroupArgs.builder()
* .name("example-resources")
* .location("West Europe")
* .build());
* var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
* .name("hdinsightstor")
* .resourceGroupName(example.name())
* .location(example.location())
* .accountTier("Standard")
* .accountReplicationType("LRS")
* .build());
* var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
* .name("hdinsight")
* .storageAccountName(exampleAccount.name())
* .containerAccessType("private")
* .build());
* var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
* .name("example-hdicluster")
* .resourceGroupName(example.name())
* .location(example.location())
* .clusterVersion("3.6")
* .tier("Standard")
* .componentVersion(SparkClusterComponentVersionArgs.builder()
* .spark("2.3")
* .build())
* .gateway(SparkClusterGatewayArgs.builder()
* .username("acctestusrgw")
* .password("Password123!")
* .build())
* .storageAccounts(SparkClusterStorageAccountArgs.builder()
* .storageContainerId(exampleContainer.id())
* .storageAccountKey(exampleAccount.primaryAccessKey())
* .isDefault(true)
* .build())
* .roles(SparkClusterRolesArgs.builder()
* .headNode(SparkClusterRolesHeadNodeArgs.builder()
* .vmSize("Standard_A3")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .build())
* .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
* .vmSize("Standard_A3")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .targetInstanceCount(3)
* .build())
* .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
* .vmSize("Medium")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .build())
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* example:
* type: azure:core:ResourceGroup
* properties:
* name: example-resources
* location: West Europe
* exampleAccount:
* type: azure:storage:Account
* name: example
* properties:
* name: hdinsightstor
* resourceGroupName: ${example.name}
* location: ${example.location}
* accountTier: Standard
* accountReplicationType: LRS
* exampleContainer:
* type: azure:storage:Container
* name: example
* properties:
* name: hdinsight
* storageAccountName: ${exampleAccount.name}
* containerAccessType: private
* exampleSparkCluster:
* type: azure:hdinsight:SparkCluster
* name: example
* properties:
* name: example-hdicluster
* resourceGroupName: ${example.name}
* location: ${example.location}
* clusterVersion: '3.6'
* tier: Standard
* componentVersion:
* spark: '2.3'
* gateway:
* username: acctestusrgw
* password: Password123!
* storageAccounts:
* - storageContainerId: ${exampleContainer.id}
* storageAccountKey: ${exampleAccount.primaryAccessKey}
* isDefault: true
* roles:
* headNode:
* vmSize: Standard_A3
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* workerNode:
* vmSize: Standard_A3
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* targetInstanceCount: 3
* zookeeperNode:
* vmSize: Medium
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* ```
*
* ## Import
* HDInsight Spark Clusters can be imported using the `resource id`, e.g.
* ```sh
* $ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
* ```
* @property clusterVersion Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
* @property componentVersion A `component_version` block as defined below.
* @property computeIsolation A `compute_isolation` block as defined below.
* @property diskEncryptions One or more `disk_encryption` block as defined below.
* @property encryptionInTransitEnabled Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
* @property extension An `extension` block as defined below.
* @property gateway A `gateway` block as defined below.
* @property location Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
* @property metastores A `metastores` block as defined below.
* @property monitor A `monitor` block as defined below.
* @property name Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
* @property network A `network` block as defined below.
* @property privateLinkConfiguration A `private_link_configuration` block as defined below.
* @property resourceGroupName Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
* @property roles A `roles` block as defined below.
* @property securityProfile A `security_profile` block as defined below. Changing this forces a new resource to be created.
* @property storageAccountGen2 A `storage_account_gen2` block as defined below.
* @property storageAccounts One or more `storage_account` block as defined below.
* @property tags A map of Tags which should be assigned to this HDInsight Spark Cluster.
* @property tier Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created.
* @property tlsMinVersion The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.
* > **NOTE:** Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see [Azure HDInsight TLS 1.2 Enforcement](https://azure.microsoft.com/en-us/updates/azure-hdinsight-tls-12-enforcement/).
*/
public data class SparkClusterArgs(
public val clusterVersion: Output? = null,
public val componentVersion: Output? = null,
public val computeIsolation: Output? = null,
public val diskEncryptions: Output>? = null,
public val encryptionInTransitEnabled: Output? = null,
public val extension: Output? = null,
public val gateway: Output? = null,
public val location: Output? = null,
public val metastores: Output? = null,
public val monitor: Output? = null,
public val name: Output? = null,
public val network: Output? = null,
public val privateLinkConfiguration: Output? = null,
public val resourceGroupName: Output? = null,
public val roles: Output? = null,
public val securityProfile: Output? = null,
public val storageAccountGen2: Output? = null,
public val storageAccounts: Output>? = null,
public val tags: Output
© 2015 - 2025 Weber Informatics LLC | Privacy Policy