All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azure.synapse.SparkPool Maven / Gradle / Ivy

Go to download

A Pulumi package for creating and managing Microsoft Azure cloud resources, based on the Terraform azurerm provider. We recommend using the [Azure Native provider](https://github.com/pulumi/pulumi-azure-native) to provision Azure infrastructure. Azure Native provides complete coverage of Azure resources and same-day access to new resources and resource updates.

There is a newer version: 6.10.0-alpha.1731737215
Show newest version
// *** WARNING: this file was generated by pulumi-java-gen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***

package com.pulumi.azure.synapse;

import com.pulumi.azure.Utilities;
import com.pulumi.azure.synapse.SparkPoolArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolState;
import com.pulumi.azure.synapse.outputs.SparkPoolAutoPause;
import com.pulumi.azure.synapse.outputs.SparkPoolAutoScale;
import com.pulumi.azure.synapse.outputs.SparkPoolLibraryRequirement;
import com.pulumi.azure.synapse.outputs.SparkPoolSparkConfig;
import com.pulumi.core.Output;
import com.pulumi.core.annotations.Export;
import com.pulumi.core.annotations.ResourceType;
import com.pulumi.core.internal.Codegen;
import java.lang.Boolean;
import java.lang.Integer;
import java.lang.String;
import java.util.Map;
import java.util.Optional;
import javax.annotation.Nullable;

/**
 * Manages a Synapse Spark Pool.
 * 
 * ## Example Usage
 * 
 * <!--Start PulumiCodeChooser -->
 * 
 * {@code
 * package generated_program;
 * 
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.azure.core.ResourceGroup;
 * import com.pulumi.azure.core.ResourceGroupArgs;
 * import com.pulumi.azure.storage.Account;
 * import com.pulumi.azure.storage.AccountArgs;
 * import com.pulumi.azure.storage.DataLakeGen2Filesystem;
 * import com.pulumi.azure.storage.DataLakeGen2FilesystemArgs;
 * import com.pulumi.azure.synapse.Workspace;
 * import com.pulumi.azure.synapse.WorkspaceArgs;
 * import com.pulumi.azure.synapse.inputs.WorkspaceIdentityArgs;
 * import com.pulumi.azure.synapse.SparkPool;
 * import com.pulumi.azure.synapse.SparkPoolArgs;
 * import com.pulumi.azure.synapse.inputs.SparkPoolAutoScaleArgs;
 * import com.pulumi.azure.synapse.inputs.SparkPoolAutoPauseArgs;
 * import com.pulumi.azure.synapse.inputs.SparkPoolLibraryRequirementArgs;
 * import com.pulumi.azure.synapse.inputs.SparkPoolSparkConfigArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * 
 * public class App }{{@code
 *     public static void main(String[] args) }{{@code
 *         Pulumi.run(App::stack);
 *     }}{@code
 * 
 *     public static void stack(Context ctx) }{{@code
 *         var example = new ResourceGroup("example", ResourceGroupArgs.builder()
 *             .name("example-resources")
 *             .location("West Europe")
 *             .build());
 * 
 *         var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
 *             .name("examplestorageacc")
 *             .resourceGroupName(example.name())
 *             .location(example.location())
 *             .accountTier("Standard")
 *             .accountReplicationType("LRS")
 *             .accountKind("StorageV2")
 *             .isHnsEnabled("true")
 *             .build());
 * 
 *         var exampleDataLakeGen2Filesystem = new DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", DataLakeGen2FilesystemArgs.builder()
 *             .name("example")
 *             .storageAccountId(exampleAccount.id())
 *             .build());
 * 
 *         var exampleWorkspace = new Workspace("exampleWorkspace", WorkspaceArgs.builder()
 *             .name("example")
 *             .resourceGroupName(example.name())
 *             .location(example.location())
 *             .storageDataLakeGen2FilesystemId(exampleDataLakeGen2Filesystem.id())
 *             .sqlAdministratorLogin("sqladminuser")
 *             .sqlAdministratorLoginPassword("H}{@literal @}{@code Sh1CoR3!")
 *             .identity(WorkspaceIdentityArgs.builder()
 *                 .type("SystemAssigned")
 *                 .build())
 *             .build());
 * 
 *         var exampleSparkPool = new SparkPool("exampleSparkPool", SparkPoolArgs.builder()
 *             .name("example")
 *             .synapseWorkspaceId(exampleWorkspace.id())
 *             .nodeSizeFamily("MemoryOptimized")
 *             .nodeSize("Small")
 *             .cacheSize(100)
 *             .autoScale(SparkPoolAutoScaleArgs.builder()
 *                 .maxNodeCount(50)
 *                 .minNodeCount(3)
 *                 .build())
 *             .autoPause(SparkPoolAutoPauseArgs.builder()
 *                 .delayInMinutes(15)
 *                 .build())
 *             .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
 *                 .content("""
 * appnope==0.1.0
 * beautifulsoup4==4.6.3
 *                 """)
 *                 .filename("requirements.txt")
 *                 .build())
 *             .sparkConfig(SparkPoolSparkConfigArgs.builder()
 *                 .content("""
 * spark.shuffle.spill                true
 *                 """)
 *                 .filename("config.txt")
 *                 .build())
 *             .tags(Map.of("ENV", "Production"))
 *             .build());
 * 
 *     }}{@code
 * }}{@code
 * }
 * 
* <!--End PulumiCodeChooser --> * * ## Import * * Synapse Spark Pool can be imported using the `resource id`, e.g. * * ```sh * $ pulumi import azure:synapse/sparkPool:SparkPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/bigDataPools/sparkPool1 * ``` * */ @ResourceType(type="azure:synapse/sparkPool:SparkPool") public class SparkPool extends com.pulumi.resources.CustomResource { /** * An `auto_pause` block as defined below. * */ @Export(name="autoPause", refs={SparkPoolAutoPause.class}, tree="[0]") private Output autoPause; /** * @return An `auto_pause` block as defined below. * */ public Output> autoPause() { return Codegen.optional(this.autoPause); } /** * An `auto_scale` block as defined below. Exactly one of `node_count` or `auto_scale` must be specified. * */ @Export(name="autoScale", refs={SparkPoolAutoScale.class}, tree="[0]") private Output autoScale; /** * @return An `auto_scale` block as defined below. Exactly one of `node_count` or `auto_scale` must be specified. * */ public Output> autoScale() { return Codegen.optional(this.autoScale); } /** * The cache size in the Spark Pool. * */ @Export(name="cacheSize", refs={Integer.class}, tree="[0]") private Output cacheSize; /** * @return The cache size in the Spark Pool. * */ public Output> cacheSize() { return Codegen.optional(this.cacheSize); } /** * Indicates whether compute isolation is enabled or not. Defaults to `false`. * */ @Export(name="computeIsolationEnabled", refs={Boolean.class}, tree="[0]") private Output computeIsolationEnabled; /** * @return Indicates whether compute isolation is enabled or not. Defaults to `false`. * */ public Output> computeIsolationEnabled() { return Codegen.optional(this.computeIsolationEnabled); } @Export(name="dynamicExecutorAllocationEnabled", refs={Boolean.class}, tree="[0]") private Output dynamicExecutorAllocationEnabled; public Output> dynamicExecutorAllocationEnabled() { return Codegen.optional(this.dynamicExecutorAllocationEnabled); } @Export(name="libraryRequirement", refs={SparkPoolLibraryRequirement.class}, tree="[0]") private Output libraryRequirement; public Output> libraryRequirement() { return Codegen.optional(this.libraryRequirement); } @Export(name="maxExecutors", refs={Integer.class}, tree="[0]") private Output maxExecutors; public Output> maxExecutors() { return Codegen.optional(this.maxExecutors); } @Export(name="minExecutors", refs={Integer.class}, tree="[0]") private Output minExecutors; public Output> minExecutors() { return Codegen.optional(this.minExecutors); } /** * The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created. * */ @Export(name="name", refs={String.class}, tree="[0]") private Output name; /** * @return The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created. * */ public Output name() { return this.name; } /** * The number of nodes in the Spark Pool. Exactly one of `node_count` or `auto_scale` must be specified. * */ @Export(name="nodeCount", refs={Integer.class}, tree="[0]") private Output nodeCount; /** * @return The number of nodes in the Spark Pool. Exactly one of `node_count` or `auto_scale` must be specified. * */ public Output nodeCount() { return this.nodeCount; } /** * The level of node in the Spark Pool. Possible values are `Small`, `Medium`, `Large`, `None`, `XLarge`, `XXLarge` and `XXXLarge`. * */ @Export(name="nodeSize", refs={String.class}, tree="[0]") private Output nodeSize; /** * @return The level of node in the Spark Pool. Possible values are `Small`, `Medium`, `Large`, `None`, `XLarge`, `XXLarge` and `XXXLarge`. * */ public Output nodeSize() { return this.nodeSize; } /** * The kind of nodes that the Spark Pool provides. Possible values are `HardwareAcceleratedFPGA`, `HardwareAcceleratedGPU`, `MemoryOptimized`, and `None`. * */ @Export(name="nodeSizeFamily", refs={String.class}, tree="[0]") private Output nodeSizeFamily; /** * @return The kind of nodes that the Spark Pool provides. Possible values are `HardwareAcceleratedFPGA`, `HardwareAcceleratedGPU`, `MemoryOptimized`, and `None`. * */ public Output nodeSizeFamily() { return this.nodeSizeFamily; } @Export(name="sessionLevelPackagesEnabled", refs={Boolean.class}, tree="[0]") private Output sessionLevelPackagesEnabled; public Output> sessionLevelPackagesEnabled() { return Codegen.optional(this.sessionLevelPackagesEnabled); } @Export(name="sparkConfig", refs={SparkPoolSparkConfig.class}, tree="[0]") private Output sparkConfig; public Output> sparkConfig() { return Codegen.optional(this.sparkConfig); } @Export(name="sparkEventsFolder", refs={String.class}, tree="[0]") private Output sparkEventsFolder; public Output> sparkEventsFolder() { return Codegen.optional(this.sparkEventsFolder); } @Export(name="sparkLogFolder", refs={String.class}, tree="[0]") private Output sparkLogFolder; public Output> sparkLogFolder() { return Codegen.optional(this.sparkLogFolder); } @Export(name="sparkVersion", refs={String.class}, tree="[0]") private Output sparkVersion; public Output sparkVersion() { return this.sparkVersion; } /** * The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created. * */ @Export(name="synapseWorkspaceId", refs={String.class}, tree="[0]") private Output synapseWorkspaceId; /** * @return The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created. * */ public Output synapseWorkspaceId() { return this.synapseWorkspaceId; } @Export(name="tags", refs={Map.class,String.class}, tree="[0,1,1]") private Output> tags; public Output>> tags() { return Codegen.optional(this.tags); } /** * * @param name The _unique_ name of the resulting resource. */ public SparkPool(java.lang.String name) { this(name, SparkPoolArgs.Empty); } /** * * @param name The _unique_ name of the resulting resource. * @param args The arguments to use to populate this resource's properties. */ public SparkPool(java.lang.String name, SparkPoolArgs args) { this(name, args, null); } /** * * @param name The _unique_ name of the resulting resource. * @param args The arguments to use to populate this resource's properties. * @param options A bag of options that control this resource's behavior. */ public SparkPool(java.lang.String name, SparkPoolArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { super("azure:synapse/sparkPool:SparkPool", name, makeArgs(args, options), makeResourceOptions(options, Codegen.empty()), false); } private SparkPool(java.lang.String name, Output id, @Nullable SparkPoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { super("azure:synapse/sparkPool:SparkPool", name, state, makeResourceOptions(options, id), false); } private static SparkPoolArgs makeArgs(SparkPoolArgs args, @Nullable com.pulumi.resources.CustomResourceOptions options) { if (options != null && options.getUrn().isPresent()) { return null; } return args == null ? SparkPoolArgs.Empty : args; } private static com.pulumi.resources.CustomResourceOptions makeResourceOptions(@Nullable com.pulumi.resources.CustomResourceOptions options, @Nullable Output id) { var defaultOptions = com.pulumi.resources.CustomResourceOptions.builder() .version(Utilities.getVersion()) .build(); return com.pulumi.resources.CustomResourceOptions.merge(defaultOptions, options, id); } /** * Get an existing Host resource's state with the given name, ID, and optional extra * properties used to qualify the lookup. * * @param name The _unique_ name of the resulting resource. * @param id The _unique_ provider ID of the resource to lookup. * @param state * @param options Optional settings to control the behavior of the CustomResource. */ public static SparkPool get(java.lang.String name, Output id, @Nullable SparkPoolState state, @Nullable com.pulumi.resources.CustomResourceOptions options) { return new SparkPool(name, id, state, options); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy