Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.pulumi.azure.hdinsight.kotlin.SparkCluster.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.azure.hdinsight.kotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterComponentVersion
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterComputeIsolation
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterDiskEncryption
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterExtension
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterGateway
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterMetastores
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterMonitor
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterNetwork
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterPrivateLinkConfiguration
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterRoles
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterSecurityProfile
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterStorageAccount
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterStorageAccountGen2
import com.pulumi.core.Output
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.collections.Map
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterComponentVersion.Companion.toKotlin as sparkClusterComponentVersionToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterComputeIsolation.Companion.toKotlin as sparkClusterComputeIsolationToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterDiskEncryption.Companion.toKotlin as sparkClusterDiskEncryptionToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterExtension.Companion.toKotlin as sparkClusterExtensionToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterGateway.Companion.toKotlin as sparkClusterGatewayToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterMetastores.Companion.toKotlin as sparkClusterMetastoresToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterMonitor.Companion.toKotlin as sparkClusterMonitorToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterNetwork.Companion.toKotlin as sparkClusterNetworkToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterPrivateLinkConfiguration.Companion.toKotlin as sparkClusterPrivateLinkConfigurationToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterRoles.Companion.toKotlin as sparkClusterRolesToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterSecurityProfile.Companion.toKotlin as sparkClusterSecurityProfileToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterStorageAccount.Companion.toKotlin as sparkClusterStorageAccountToKotlin
import com.pulumi.azure.hdinsight.kotlin.outputs.SparkClusterStorageAccountGen2.Companion.toKotlin as sparkClusterStorageAccountGen2ToKotlin
/**
* Builder for [SparkCluster].
*/
@PulumiTagMarker
public class SparkClusterResourceBuilder internal constructor() {
public var name: String? = null
public var args: SparkClusterArgs = SparkClusterArgs()
public var opts: CustomResourceOptions = CustomResourceOptions()
/**
* @param name The _unique_ name of the resulting resource.
*/
public fun name(`value`: String) {
this.name = value
}
/**
* @param block The arguments to use to populate this resource's properties.
*/
public suspend fun args(block: suspend SparkClusterArgsBuilder.() -> Unit) {
val builder = SparkClusterArgsBuilder()
block(builder)
this.args = builder.build()
}
/**
* @param block A bag of options that control this resource's behavior.
*/
public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
}
internal fun build(): SparkCluster {
val builtJavaResource = com.pulumi.azure.hdinsight.SparkCluster(
this.name,
this.args.toJava(),
this.opts.toJava(),
)
return SparkCluster(builtJavaResource)
}
}
/**
* Manages a HDInsight Spark Cluster.
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as azure from "@pulumi/azure";
* const example = new azure.core.ResourceGroup("example", {
* name: "example-resources",
* location: "West Europe",
* });
* const exampleAccount = new azure.storage.Account("example", {
* name: "hdinsightstor",
* resourceGroupName: example.name,
* location: example.location,
* accountTier: "Standard",
* accountReplicationType: "LRS",
* });
* const exampleContainer = new azure.storage.Container("example", {
* name: "hdinsight",
* storageAccountName: exampleAccount.name,
* containerAccessType: "private",
* });
* const exampleSparkCluster = new azure.hdinsight.SparkCluster("example", {
* name: "example-hdicluster",
* resourceGroupName: example.name,
* location: example.location,
* clusterVersion: "3.6",
* tier: "Standard",
* componentVersion: {
* spark: "2.3",
* },
* gateway: {
* username: "acctestusrgw",
* password: "Password123!",
* },
* storageAccounts: [{
* storageContainerId: exampleContainer.id,
* storageAccountKey: exampleAccount.primaryAccessKey,
* isDefault: true,
* }],
* roles: {
* headNode: {
* vmSize: "Standard_A3",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* },
* workerNode: {
* vmSize: "Standard_A3",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* targetInstanceCount: 3,
* },
* zookeeperNode: {
* vmSize: "Medium",
* username: "acctestusrvm",
* password: "AccTestvdSC4daf986!",
* },
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_azure as azure
* example = azure.core.ResourceGroup("example",
* name="example-resources",
* location="West Europe")
* example_account = azure.storage.Account("example",
* name="hdinsightstor",
* resource_group_name=example.name,
* location=example.location,
* account_tier="Standard",
* account_replication_type="LRS")
* example_container = azure.storage.Container("example",
* name="hdinsight",
* storage_account_name=example_account.name,
* container_access_type="private")
* example_spark_cluster = azure.hdinsight.SparkCluster("example",
* name="example-hdicluster",
* resource_group_name=example.name,
* location=example.location,
* cluster_version="3.6",
* tier="Standard",
* component_version={
* "spark": "2.3",
* },
* gateway={
* "username": "acctestusrgw",
* "password": "Password123!",
* },
* storage_accounts=[{
* "storage_container_id": example_container.id,
* "storage_account_key": example_account.primary_access_key,
* "is_default": True,
* }],
* roles={
* "head_node": {
* "vm_size": "Standard_A3",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* },
* "worker_node": {
* "vm_size": "Standard_A3",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* "target_instance_count": 3,
* },
* "zookeeper_node": {
* "vm_size": "Medium",
* "username": "acctestusrvm",
* "password": "AccTestvdSC4daf986!",
* },
* })
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Azure = Pulumi.Azure;
* return await Deployment.RunAsync(() =>
* {
* var example = new Azure.Core.ResourceGroup("example", new()
* {
* Name = "example-resources",
* Location = "West Europe",
* });
* var exampleAccount = new Azure.Storage.Account("example", new()
* {
* Name = "hdinsightstor",
* ResourceGroupName = example.Name,
* Location = example.Location,
* AccountTier = "Standard",
* AccountReplicationType = "LRS",
* });
* var exampleContainer = new Azure.Storage.Container("example", new()
* {
* Name = "hdinsight",
* StorageAccountName = exampleAccount.Name,
* ContainerAccessType = "private",
* });
* var exampleSparkCluster = new Azure.HDInsight.SparkCluster("example", new()
* {
* Name = "example-hdicluster",
* ResourceGroupName = example.Name,
* Location = example.Location,
* ClusterVersion = "3.6",
* Tier = "Standard",
* ComponentVersion = new Azure.HDInsight.Inputs.SparkClusterComponentVersionArgs
* {
* Spark = "2.3",
* },
* Gateway = new Azure.HDInsight.Inputs.SparkClusterGatewayArgs
* {
* Username = "acctestusrgw",
* Password = "Password123!",
* },
* StorageAccounts = new[]
* {
* new Azure.HDInsight.Inputs.SparkClusterStorageAccountArgs
* {
* StorageContainerId = exampleContainer.Id,
* StorageAccountKey = exampleAccount.PrimaryAccessKey,
* IsDefault = true,
* },
* },
* Roles = new Azure.HDInsight.Inputs.SparkClusterRolesArgs
* {
* HeadNode = new Azure.HDInsight.Inputs.SparkClusterRolesHeadNodeArgs
* {
* VmSize = "Standard_A3",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* },
* WorkerNode = new Azure.HDInsight.Inputs.SparkClusterRolesWorkerNodeArgs
* {
* VmSize = "Standard_A3",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* TargetInstanceCount = 3,
* },
* ZookeeperNode = new Azure.HDInsight.Inputs.SparkClusterRolesZookeeperNodeArgs
* {
* VmSize = "Medium",
* Username = "acctestusrvm",
* Password = "AccTestvdSC4daf986!",
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/hdinsight"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
* Name: pulumi.String("example-resources"),
* Location: pulumi.String("West Europe"),
* })
* if err != nil {
* return err
* }
* exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
* Name: pulumi.String("hdinsightstor"),
* ResourceGroupName: example.Name,
* Location: example.Location,
* AccountTier: pulumi.String("Standard"),
* AccountReplicationType: pulumi.String("LRS"),
* })
* if err != nil {
* return err
* }
* exampleContainer, err := storage.NewContainer(ctx, "example", &storage.ContainerArgs{
* Name: pulumi.String("hdinsight"),
* StorageAccountName: exampleAccount.Name,
* ContainerAccessType: pulumi.String("private"),
* })
* if err != nil {
* return err
* }
* _, err = hdinsight.NewSparkCluster(ctx, "example", &hdinsight.SparkClusterArgs{
* Name: pulumi.String("example-hdicluster"),
* ResourceGroupName: example.Name,
* Location: example.Location,
* ClusterVersion: pulumi.String("3.6"),
* Tier: pulumi.String("Standard"),
* ComponentVersion: &hdinsight.SparkClusterComponentVersionArgs{
* Spark: pulumi.String("2.3"),
* },
* Gateway: &hdinsight.SparkClusterGatewayArgs{
* Username: pulumi.String("acctestusrgw"),
* Password: pulumi.String("Password123!"),
* },
* StorageAccounts: hdinsight.SparkClusterStorageAccountArray{
* &hdinsight.SparkClusterStorageAccountArgs{
* StorageContainerId: exampleContainer.ID(),
* StorageAccountKey: exampleAccount.PrimaryAccessKey,
* IsDefault: pulumi.Bool(true),
* },
* },
* Roles: &hdinsight.SparkClusterRolesArgs{
* HeadNode: &hdinsight.SparkClusterRolesHeadNodeArgs{
* VmSize: pulumi.String("Standard_A3"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* },
* WorkerNode: &hdinsight.SparkClusterRolesWorkerNodeArgs{
* VmSize: pulumi.String("Standard_A3"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* TargetInstanceCount: pulumi.Int(3),
* },
* ZookeeperNode: &hdinsight.SparkClusterRolesZookeeperNodeArgs{
* VmSize: pulumi.String("Medium"),
* Username: pulumi.String("acctestusrvm"),
* Password: pulumi.String("AccTestvdSC4daf986!"),
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.azure.core.ResourceGroup;
* import com.pulumi.azure.core.ResourceGroupArgs;
* import com.pulumi.azure.storage.Account;
* import com.pulumi.azure.storage.AccountArgs;
* import com.pulumi.azure.storage.Container;
* import com.pulumi.azure.storage.ContainerArgs;
* import com.pulumi.azure.hdinsight.SparkCluster;
* import com.pulumi.azure.hdinsight.SparkClusterArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterComponentVersionArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterGatewayArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterStorageAccountArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesHeadNodeArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesWorkerNodeArgs;
* import com.pulumi.azure.hdinsight.inputs.SparkClusterRolesZookeeperNodeArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var example = new ResourceGroup("example", ResourceGroupArgs.builder()
* .name("example-resources")
* .location("West Europe")
* .build());
* var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
* .name("hdinsightstor")
* .resourceGroupName(example.name())
* .location(example.location())
* .accountTier("Standard")
* .accountReplicationType("LRS")
* .build());
* var exampleContainer = new Container("exampleContainer", ContainerArgs.builder()
* .name("hdinsight")
* .storageAccountName(exampleAccount.name())
* .containerAccessType("private")
* .build());
* var exampleSparkCluster = new SparkCluster("exampleSparkCluster", SparkClusterArgs.builder()
* .name("example-hdicluster")
* .resourceGroupName(example.name())
* .location(example.location())
* .clusterVersion("3.6")
* .tier("Standard")
* .componentVersion(SparkClusterComponentVersionArgs.builder()
* .spark("2.3")
* .build())
* .gateway(SparkClusterGatewayArgs.builder()
* .username("acctestusrgw")
* .password("Password123!")
* .build())
* .storageAccounts(SparkClusterStorageAccountArgs.builder()
* .storageContainerId(exampleContainer.id())
* .storageAccountKey(exampleAccount.primaryAccessKey())
* .isDefault(true)
* .build())
* .roles(SparkClusterRolesArgs.builder()
* .headNode(SparkClusterRolesHeadNodeArgs.builder()
* .vmSize("Standard_A3")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .build())
* .workerNode(SparkClusterRolesWorkerNodeArgs.builder()
* .vmSize("Standard_A3")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .targetInstanceCount(3)
* .build())
* .zookeeperNode(SparkClusterRolesZookeeperNodeArgs.builder()
* .vmSize("Medium")
* .username("acctestusrvm")
* .password("AccTestvdSC4daf986!")
* .build())
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* example:
* type: azure:core:ResourceGroup
* properties:
* name: example-resources
* location: West Europe
* exampleAccount:
* type: azure:storage:Account
* name: example
* properties:
* name: hdinsightstor
* resourceGroupName: ${example.name}
* location: ${example.location}
* accountTier: Standard
* accountReplicationType: LRS
* exampleContainer:
* type: azure:storage:Container
* name: example
* properties:
* name: hdinsight
* storageAccountName: ${exampleAccount.name}
* containerAccessType: private
* exampleSparkCluster:
* type: azure:hdinsight:SparkCluster
* name: example
* properties:
* name: example-hdicluster
* resourceGroupName: ${example.name}
* location: ${example.location}
* clusterVersion: '3.6'
* tier: Standard
* componentVersion:
* spark: '2.3'
* gateway:
* username: acctestusrgw
* password: Password123!
* storageAccounts:
* - storageContainerId: ${exampleContainer.id}
* storageAccountKey: ${exampleAccount.primaryAccessKey}
* isDefault: true
* roles:
* headNode:
* vmSize: Standard_A3
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* workerNode:
* vmSize: Standard_A3
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* targetInstanceCount: 3
* zookeeperNode:
* vmSize: Medium
* username: acctestusrvm
* password: AccTestvdSC4daf986!
* ```
*
* ## Import
* HDInsight Spark Clusters can be imported using the `resource id`, e.g.
* ```sh
* $ pulumi import azure:hdinsight/sparkCluster:SparkCluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.HDInsight/clusters/cluster1
* ```
*/
public class SparkCluster internal constructor(
override val javaResource: com.pulumi.azure.hdinsight.SparkCluster,
) : KotlinCustomResource(javaResource, SparkClusterMapper) {
/**
* Specifies the Version of HDInsights which should be used for this Cluster. Changing this forces a new resource to be created.
*/
public val clusterVersion: Output
get() = javaResource.clusterVersion().applyValue({ args0 -> args0 })
/**
* A `component_version` block as defined below.
*/
public val componentVersion: Output
get() = javaResource.componentVersion().applyValue({ args0 ->
args0.let({ args0 ->
sparkClusterComponentVersionToKotlin(args0)
})
})
/**
* A `compute_isolation` block as defined below.
*/
public val computeIsolation: Output?
get() = javaResource.computeIsolation().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> sparkClusterComputeIsolationToKotlin(args0) })
}).orElse(null)
})
/**
* One or more `disk_encryption` block as defined below.
*/
public val diskEncryptions: Output>?
get() = javaResource.diskEncryptions().applyValue({ args0 ->
args0.map({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterDiskEncryptionToKotlin(args0)
})
})
}).orElse(null)
})
/**
* Whether encryption in transit is enabled for this Cluster. Changing this forces a new resource to be created.
*/
public val encryptionInTransitEnabled: Output?
get() = javaResource.encryptionInTransitEnabled().applyValue({ args0 ->
args0.map({ args0 ->
args0
}).orElse(null)
})
/**
* An `extension` block as defined below.
*/
public val extension: Output?
get() = javaResource.extension().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterExtensionToKotlin(args0)
})
}).orElse(null)
})
/**
* A `gateway` block as defined below.
*/
public val gateway: Output
get() = javaResource.gateway().applyValue({ args0 ->
args0.let({ args0 ->
sparkClusterGatewayToKotlin(args0)
})
})
/**
* The HTTPS Connectivity Endpoint for this HDInsight Spark Cluster.
*/
public val httpsEndpoint: Output
get() = javaResource.httpsEndpoint().applyValue({ args0 -> args0 })
/**
* Specifies the Azure Region which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
*/
public val location: Output
get() = javaResource.location().applyValue({ args0 -> args0 })
/**
* A `metastores` block as defined below.
*/
public val metastores: Output?
get() = javaResource.metastores().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterMetastoresToKotlin(args0)
})
}).orElse(null)
})
/**
* A `monitor` block as defined below.
*/
public val monitor: Output?
get() = javaResource.monitor().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterMonitorToKotlin(args0)
})
}).orElse(null)
})
/**
* Specifies the name for this HDInsight Spark Cluster. Changing this forces a new resource to be created.
*/
public val name: Output
get() = javaResource.name().applyValue({ args0 -> args0 })
/**
* A `network` block as defined below.
*/
public val network: Output?
get() = javaResource.network().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterNetworkToKotlin(args0)
})
}).orElse(null)
})
/**
* A `private_link_configuration` block as defined below.
*/
public val privateLinkConfiguration: Output?
get() = javaResource.privateLinkConfiguration().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> sparkClusterPrivateLinkConfigurationToKotlin(args0) })
}).orElse(null)
})
/**
* Specifies the name of the Resource Group in which this HDInsight Spark Cluster should exist. Changing this forces a new resource to be created.
*/
public val resourceGroupName: Output
get() = javaResource.resourceGroupName().applyValue({ args0 -> args0 })
/**
* A `roles` block as defined below.
*/
public val roles: Output
get() = javaResource.roles().applyValue({ args0 ->
args0.let({ args0 ->
sparkClusterRolesToKotlin(args0)
})
})
/**
* A `security_profile` block as defined below. Changing this forces a new resource to be created.
*/
public val securityProfile: Output?
get() = javaResource.securityProfile().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> sparkClusterSecurityProfileToKotlin(args0) })
}).orElse(null)
})
/**
* The SSH Connectivity Endpoint for this HDInsight Spark Cluster.
*/
public val sshEndpoint: Output
get() = javaResource.sshEndpoint().applyValue({ args0 -> args0 })
/**
* A `storage_account_gen2` block as defined below.
*/
public val storageAccountGen2: Output?
get() = javaResource.storageAccountGen2().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 -> sparkClusterStorageAccountGen2ToKotlin(args0) })
}).orElse(null)
})
/**
* One or more `storage_account` block as defined below.
*/
public val storageAccounts: Output>?
get() = javaResource.storageAccounts().applyValue({ args0 ->
args0.map({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
sparkClusterStorageAccountToKotlin(args0)
})
})
}).orElse(null)
})
/**
* A map of Tags which should be assigned to this HDInsight Spark Cluster.
*/
public val tags: Output>?
get() = javaResource.tags().applyValue({ args0 ->
args0.map({ args0 ->
args0.map({ args0 ->
args0.key.to(args0.value)
}).toMap()
}).orElse(null)
})
/**
* Specifies the Tier which should be used for this HDInsight Spark Cluster. Possible values are `Standard` or `Premium`. Changing this forces a new resource to be created.
*/
public val tier: Output
get() = javaResource.tier().applyValue({ args0 -> args0 })
/**
* The minimal supported TLS version. Possible values are 1.0, 1.1 or 1.2. Changing this forces a new resource to be created.
* > **NOTE:** Starting on June 30, 2020, Azure HDInsight will enforce TLS 1.2 or later versions for all HTTPS connections. For more information, see [Azure HDInsight TLS 1.2 Enforcement](https://azure.microsoft.com/en-us/updates/azure-hdinsight-tls-12-enforcement/).
*/
public val tlsMinVersion: Output?
get() = javaResource.tlsMinVersion().applyValue({ args0 ->
args0.map({ args0 ->
args0
}).orElse(null)
})
}
public object SparkClusterMapper : ResourceMapper {
override fun supportsMappingOfType(javaResource: Resource): Boolean =
com.pulumi.azure.hdinsight.SparkCluster::class == javaResource::class
override fun map(javaResource: Resource): SparkCluster = SparkCluster(
javaResource as
com.pulumi.azure.hdinsight.SparkCluster,
)
}
/**
* @see [SparkCluster].
* @param name The _unique_ name of the resulting resource.
* @param block Builder for [SparkCluster].
*/
public suspend fun sparkCluster(
name: String,
block: suspend SparkClusterResourceBuilder.() -> Unit,
): SparkCluster {
val builder = SparkClusterResourceBuilder()
builder.name(name)
block(builder)
return builder.build()
}
/**
* @see [SparkCluster].
* @param name The _unique_ name of the resulting resource.
*/
public fun sparkCluster(name: String): SparkCluster {
val builder = SparkClusterResourceBuilder()
builder.name(name)
return builder.build()
}