All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azurenative.synapse.kotlin.BigDataPool.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 2.82.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.azurenative.synapse.kotlin

import com.pulumi.azurenative.synapse.kotlin.outputs.AutoPausePropertiesResponse
import com.pulumi.azurenative.synapse.kotlin.outputs.AutoScalePropertiesResponse
import com.pulumi.azurenative.synapse.kotlin.outputs.DynamicExecutorAllocationResponse
import com.pulumi.azurenative.synapse.kotlin.outputs.LibraryInfoResponse
import com.pulumi.azurenative.synapse.kotlin.outputs.LibraryRequirementsResponse
import com.pulumi.azurenative.synapse.kotlin.outputs.SparkConfigPropertiesResponse
import com.pulumi.core.Output
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.collections.Map
import com.pulumi.azurenative.synapse.kotlin.outputs.AutoPausePropertiesResponse.Companion.toKotlin as autoPausePropertiesResponseToKotlin
import com.pulumi.azurenative.synapse.kotlin.outputs.AutoScalePropertiesResponse.Companion.toKotlin as autoScalePropertiesResponseToKotlin
import com.pulumi.azurenative.synapse.kotlin.outputs.DynamicExecutorAllocationResponse.Companion.toKotlin as dynamicExecutorAllocationResponseToKotlin
import com.pulumi.azurenative.synapse.kotlin.outputs.LibraryInfoResponse.Companion.toKotlin as libraryInfoResponseToKotlin
import com.pulumi.azurenative.synapse.kotlin.outputs.LibraryRequirementsResponse.Companion.toKotlin as libraryRequirementsResponseToKotlin
import com.pulumi.azurenative.synapse.kotlin.outputs.SparkConfigPropertiesResponse.Companion.toKotlin as sparkConfigPropertiesResponseToKotlin

/**
 * Builder for [BigDataPool].
 */
@PulumiTagMarker
public class BigDataPoolResourceBuilder internal constructor() {
    public var name: String? = null

    public var args: BigDataPoolArgs = BigDataPoolArgs()

    public var opts: CustomResourceOptions = CustomResourceOptions()

    /**
     * @param name The _unique_ name of the resulting resource.
     */
    public fun name(`value`: String) {
        this.name = value
    }

    /**
     * @param block The arguments to use to populate this resource's properties.
     */
    public suspend fun args(block: suspend BigDataPoolArgsBuilder.() -> Unit) {
        val builder = BigDataPoolArgsBuilder()
        block(builder)
        this.args = builder.build()
    }

    /**
     * @param block A bag of options that control this resource's behavior.
     */
    public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
        this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
    }

    internal fun build(): BigDataPool {
        val builtJavaResource = com.pulumi.azurenative.synapse.BigDataPool(
            this.name,
            this.args.toJava(),
            this.opts.toJava(),
        )
        return BigDataPool(builtJavaResource)
    }
}

/**
 * A Big Data pool
 * Azure REST API version: 2021-06-01. Prior API version in Azure Native 1.x: 2021-03-01.
 * Other available API versions: 2021-05-01, 2021-06-01-preview.
 * ## Example Usage
 * ### Create or update a Big Data pool
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using AzureNative = Pulumi.AzureNative;
 * return await Deployment.RunAsync(() =>
 * {
 *     var bigDataPool = new AzureNative.Synapse.BigDataPool("bigDataPool", new()
 *     {
 *         AutoPause = new AzureNative.Synapse.Inputs.AutoPausePropertiesArgs
 *         {
 *             DelayInMinutes = 15,
 *             Enabled = true,
 *         },
 *         AutoScale = new AzureNative.Synapse.Inputs.AutoScalePropertiesArgs
 *         {
 *             Enabled = true,
 *             MaxNodeCount = 50,
 *             MinNodeCount = 3,
 *         },
 *         BigDataPoolName = "ExamplePool",
 *         DefaultSparkLogFolder = "/logs",
 *         IsAutotuneEnabled = false,
 *         LibraryRequirements = new AzureNative.Synapse.Inputs.LibraryRequirementsArgs
 *         {
 *             Content = "",
 *             Filename = "requirements.txt",
 *         },
 *         Location = "West US 2",
 *         NodeCount = 4,
 *         NodeSize = AzureNative.Synapse.NodeSize.Medium,
 *         NodeSizeFamily = AzureNative.Synapse.NodeSizeFamily.MemoryOptimized,
 *         ResourceGroupName = "ExampleResourceGroup",
 *         SparkEventsFolder = "/events",
 *         SparkVersion = "3.3",
 *         Tags =
 *         {
 *             { "key", "value" },
 *         },
 *         WorkspaceName = "ExampleWorkspace",
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	synapse "github.com/pulumi/pulumi-azure-native-sdk/synapse/v2"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		_, err := synapse.NewBigDataPool(ctx, "bigDataPool", &synapse.BigDataPoolArgs{
 * 			AutoPause: &synapse.AutoPausePropertiesArgs{
 * 				DelayInMinutes: pulumi.Int(15),
 * 				Enabled:        pulumi.Bool(true),
 * 			},
 * 			AutoScale: &synapse.AutoScalePropertiesArgs{
 * 				Enabled:      pulumi.Bool(true),
 * 				MaxNodeCount: pulumi.Int(50),
 * 				MinNodeCount: pulumi.Int(3),
 * 			},
 * 			BigDataPoolName:       pulumi.String("ExamplePool"),
 * 			DefaultSparkLogFolder: pulumi.String("/logs"),
 * 			IsAutotuneEnabled:     pulumi.Bool(false),
 * 			LibraryRequirements: &synapse.LibraryRequirementsArgs{
 * 				Content:  pulumi.String(""),
 * 				Filename: pulumi.String("requirements.txt"),
 * 			},
 * 			Location:          pulumi.String("West US 2"),
 * 			NodeCount:         pulumi.Int(4),
 * 			NodeSize:          pulumi.String(synapse.NodeSizeMedium),
 * 			NodeSizeFamily:    pulumi.String(synapse.NodeSizeFamilyMemoryOptimized),
 * 			ResourceGroupName: pulumi.String("ExampleResourceGroup"),
 * 			SparkEventsFolder: pulumi.String("/events"),
 * 			SparkVersion:      pulumi.String("3.3"),
 * 			Tags: pulumi.StringMap{
 * 				"key": pulumi.String("value"),
 * 			},
 * 			WorkspaceName: pulumi.String("ExampleWorkspace"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.azurenative.synapse.BigDataPool;
 * import com.pulumi.azurenative.synapse.BigDataPoolArgs;
 * import com.pulumi.azurenative.synapse.inputs.AutoPausePropertiesArgs;
 * import com.pulumi.azurenative.synapse.inputs.AutoScalePropertiesArgs;
 * import com.pulumi.azurenative.synapse.inputs.LibraryRequirementsArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var bigDataPool = new BigDataPool("bigDataPool", BigDataPoolArgs.builder()
 *             .autoPause(AutoPausePropertiesArgs.builder()
 *                 .delayInMinutes(15)
 *                 .enabled(true)
 *                 .build())
 *             .autoScale(AutoScalePropertiesArgs.builder()
 *                 .enabled(true)
 *                 .maxNodeCount(50)
 *                 .minNodeCount(3)
 *                 .build())
 *             .bigDataPoolName("ExamplePool")
 *             .defaultSparkLogFolder("/logs")
 *             .isAutotuneEnabled(false)
 *             .libraryRequirements(LibraryRequirementsArgs.builder()
 *                 .content("")
 *                 .filename("requirements.txt")
 *                 .build())
 *             .location("West US 2")
 *             .nodeCount(4)
 *             .nodeSize("Medium")
 *             .nodeSizeFamily("MemoryOptimized")
 *             .resourceGroupName("ExampleResourceGroup")
 *             .sparkEventsFolder("/events")
 *             .sparkVersion("3.3")
 *             .tags(Map.of("key", "value"))
 *             .workspaceName("ExampleWorkspace")
 *             .build());
 *     }
 * }
 * ```
 * ## Import
 * An existing resource can be imported using its type token, name, and identifier, e.g.
 * ```sh
 * $ pulumi import azure-native:synapse:BigDataPool ExamplePool /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}
 * ```
 */
public class BigDataPool internal constructor(
    override val javaResource: com.pulumi.azurenative.synapse.BigDataPool,
) : KotlinCustomResource(javaResource, BigDataPoolMapper) {
    /**
     * Auto-pausing properties
     */
    public val autoPause: Output?
        get() = javaResource.autoPause().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    autoPausePropertiesResponseToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * Auto-scaling properties
     */
    public val autoScale: Output?
        get() = javaResource.autoScale().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    autoScalePropertiesResponseToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The cache size
     */
    public val cacheSize: Output?
        get() = javaResource.cacheSize().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * The time when the Big Data pool was created.
     */
    public val creationDate: Output
        get() = javaResource.creationDate().applyValue({ args0 -> args0 })

    /**
     * List of custom libraries/packages associated with the spark pool.
     */
    public val customLibraries: Output>?
        get() = javaResource.customLibraries().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 ->
                    args0.let({ args0 ->
                        libraryInfoResponseToKotlin(args0)
                    })
                })
            }).orElse(null)
        })

    /**
     * The default folder where Spark logs will be written.
     */
    public val defaultSparkLogFolder: Output?
        get() = javaResource.defaultSparkLogFolder().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Dynamic Executor Allocation
     */
    public val dynamicExecutorAllocation: Output?
        get() = javaResource.dynamicExecutorAllocation().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> dynamicExecutorAllocationResponseToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * Whether autotune is required or not.
     */
    public val isAutotuneEnabled: Output?
        get() = javaResource.isAutotuneEnabled().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Whether compute isolation is required or not.
     */
    public val isComputeIsolationEnabled: Output?
        get() = javaResource.isComputeIsolationEnabled().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The time when the Big Data pool was updated successfully.
     */
    public val lastSucceededTimestamp: Output
        get() = javaResource.lastSucceededTimestamp().applyValue({ args0 -> args0 })

    /**
     * Library version requirements
     */
    public val libraryRequirements: Output?
        get() = javaResource.libraryRequirements().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> libraryRequirementsResponseToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * The geo-location where the resource lives
     */
    public val location: Output
        get() = javaResource.location().applyValue({ args0 -> args0 })

    /**
     * The name of the resource
     */
    public val name: Output
        get() = javaResource.name().applyValue({ args0 -> args0 })

    /**
     * The number of nodes in the Big Data pool.
     */
    public val nodeCount: Output?
        get() = javaResource.nodeCount().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * The level of compute power that each node in the Big Data pool has.
     */
    public val nodeSize: Output?
        get() = javaResource.nodeSize().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * The kind of nodes that the Big Data pool provides.
     */
    public val nodeSizeFamily: Output?
        get() = javaResource.nodeSizeFamily().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The state of the Big Data pool.
     */
    public val provisioningState: Output?
        get() = javaResource.provisioningState().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Whether session level packages enabled.
     */
    public val sessionLevelPackagesEnabled: Output?
        get() = javaResource.sessionLevelPackagesEnabled().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Spark configuration file to specify additional properties
     */
    public val sparkConfigProperties: Output?
        get() = javaResource.sparkConfigProperties().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> sparkConfigPropertiesResponseToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * The Spark events folder
     */
    public val sparkEventsFolder: Output?
        get() = javaResource.sparkEventsFolder().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The Apache Spark version.
     */
    public val sparkVersion: Output?
        get() = javaResource.sparkVersion().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Resource tags.
     */
    public val tags: Output>?
        get() = javaResource.tags().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 ->
                    args0.key.to(args0.value)
                }).toMap()
            }).orElse(null)
        })

    /**
     * The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
     */
    public val type: Output
        get() = javaResource.type().applyValue({ args0 -> args0 })
}

public object BigDataPoolMapper : ResourceMapper {
    override fun supportsMappingOfType(javaResource: Resource): Boolean =
        com.pulumi.azurenative.synapse.BigDataPool::class == javaResource::class

    override fun map(javaResource: Resource): BigDataPool = BigDataPool(
        javaResource as
            com.pulumi.azurenative.synapse.BigDataPool,
    )
}

/**
 * @see [BigDataPool].
 * @param name The _unique_ name of the resulting resource.
 * @param block Builder for [BigDataPool].
 */
public suspend fun bigDataPool(name: String, block: suspend BigDataPoolResourceBuilder.() -> Unit): BigDataPool {
    val builder = BigDataPoolResourceBuilder()
    builder.name(name)
    block(builder)
    return builder.build()
}

/**
 * @see [BigDataPool].
 * @param name The _unique_ name of the resulting resource.
 */
public fun bigDataPool(name: String): BigDataPool {
    val builder = BigDataPoolResourceBuilder()
    builder.name(name)
    return builder.build()
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy