com.pulumi.azure.streamanalytics.kotlin.OutputTableArgs.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-azure-kotlin Show documentation
Show all versions of pulumi-azure-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.azure.streamanalytics.kotlin
import com.pulumi.azure.streamanalytics.OutputTableArgs.builder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
import kotlin.jvm.JvmName
/**
* Manages a Stream Analytics Output Table.
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as azure from "@pulumi/azure";
* const exampleResourceGroup = new azure.core.ResourceGroup("example", {
* name: "rg-example",
* location: "West Europe",
* });
* const example = azure.streamanalytics.getJobOutput({
* name: "example-job",
* resourceGroupName: exampleResourceGroup.name,
* });
* const exampleAccount = new azure.storage.Account("example", {
* name: "examplesa",
* resourceGroupName: exampleResourceGroup.name,
* location: exampleResourceGroup.location,
* accountTier: "Standard",
* accountReplicationType: "LRS",
* });
* const exampleTable = new azure.storage.Table("example", {
* name: "exampletable",
* storageAccountName: exampleAccount.name,
* });
* const exampleOutputTable = new azure.streamanalytics.OutputTable("example", {
* name: "output-to-storage-table",
* streamAnalyticsJobName: example.apply(example => example.name),
* resourceGroupName: example.apply(example => example.resourceGroupName),
* storageAccountName: exampleAccount.name,
* storageAccountKey: exampleAccount.primaryAccessKey,
* table: exampleTable.name,
* partitionKey: "foo",
* rowKey: "bar",
* batchSize: 100,
* });
* ```
* ```python
* import pulumi
* import pulumi_azure as azure
* example_resource_group = azure.core.ResourceGroup("example",
* name="rg-example",
* location="West Europe")
* example = azure.streamanalytics.get_job_output(name="example-job",
* resource_group_name=example_resource_group.name)
* example_account = azure.storage.Account("example",
* name="examplesa",
* resource_group_name=example_resource_group.name,
* location=example_resource_group.location,
* account_tier="Standard",
* account_replication_type="LRS")
* example_table = azure.storage.Table("example",
* name="exampletable",
* storage_account_name=example_account.name)
* example_output_table = azure.streamanalytics.OutputTable("example",
* name="output-to-storage-table",
* stream_analytics_job_name=example.name,
* resource_group_name=example.resource_group_name,
* storage_account_name=example_account.name,
* storage_account_key=example_account.primary_access_key,
* table=example_table.name,
* partition_key="foo",
* row_key="bar",
* batch_size=100)
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Azure = Pulumi.Azure;
* return await Deployment.RunAsync(() =>
* {
* var exampleResourceGroup = new Azure.Core.ResourceGroup("example", new()
* {
* Name = "rg-example",
* Location = "West Europe",
* });
* var example = Azure.StreamAnalytics.GetJob.Invoke(new()
* {
* Name = "example-job",
* ResourceGroupName = exampleResourceGroup.Name,
* });
* var exampleAccount = new Azure.Storage.Account("example", new()
* {
* Name = "examplesa",
* ResourceGroupName = exampleResourceGroup.Name,
* Location = exampleResourceGroup.Location,
* AccountTier = "Standard",
* AccountReplicationType = "LRS",
* });
* var exampleTable = new Azure.Storage.Table("example", new()
* {
* Name = "exampletable",
* StorageAccountName = exampleAccount.Name,
* });
* var exampleOutputTable = new Azure.StreamAnalytics.OutputTable("example", new()
* {
* Name = "output-to-storage-table",
* StreamAnalyticsJobName = example.Apply(getJobResult => getJobResult.Name),
* ResourceGroupName = example.Apply(getJobResult => getJobResult.ResourceGroupName),
* StorageAccountName = exampleAccount.Name,
* StorageAccountKey = exampleAccount.PrimaryAccessKey,
* Table = exampleTable.Name,
* PartitionKey = "foo",
* RowKey = "bar",
* BatchSize = 100,
* });
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
* "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/streamanalytics"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* exampleResourceGroup, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
* Name: pulumi.String("rg-example"),
* Location: pulumi.String("West Europe"),
* })
* if err != nil {
* return err
* }
* example := streamanalytics.LookupJobOutput(ctx, streamanalytics.GetJobOutputArgs{
* Name: pulumi.String("example-job"),
* ResourceGroupName: exampleResourceGroup.Name,
* }, nil)
* exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
* Name: pulumi.String("examplesa"),
* ResourceGroupName: exampleResourceGroup.Name,
* Location: exampleResourceGroup.Location,
* AccountTier: pulumi.String("Standard"),
* AccountReplicationType: pulumi.String("LRS"),
* })
* if err != nil {
* return err
* }
* exampleTable, err := storage.NewTable(ctx, "example", &storage.TableArgs{
* Name: pulumi.String("exampletable"),
* StorageAccountName: exampleAccount.Name,
* })
* if err != nil {
* return err
* }
* _, err = streamanalytics.NewOutputTable(ctx, "example", &streamanalytics.OutputTableArgs{
* Name: pulumi.String("output-to-storage-table"),
* StreamAnalyticsJobName: example.ApplyT(func(example streamanalytics.GetJobResult) (*string, error) {
* return &example.Name, nil
* }).(pulumi.StringPtrOutput),
* ResourceGroupName: example.ApplyT(func(example streamanalytics.GetJobResult) (*string, error) {
* return &example.ResourceGroupName, nil
* }).(pulumi.StringPtrOutput),
* StorageAccountName: exampleAccount.Name,
* StorageAccountKey: exampleAccount.PrimaryAccessKey,
* Table: exampleTable.Name,
* PartitionKey: pulumi.String("foo"),
* RowKey: pulumi.String("bar"),
* BatchSize: pulumi.Int(100),
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.azure.core.ResourceGroup;
* import com.pulumi.azure.core.ResourceGroupArgs;
* import com.pulumi.azure.streamanalytics.StreamanalyticsFunctions;
* import com.pulumi.azure.streamanalytics.inputs.GetJobArgs;
* import com.pulumi.azure.storage.Account;
* import com.pulumi.azure.storage.AccountArgs;
* import com.pulumi.azure.storage.Table;
* import com.pulumi.azure.storage.TableArgs;
* import com.pulumi.azure.streamanalytics.OutputTable;
* import com.pulumi.azure.streamanalytics.OutputTableArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var exampleResourceGroup = new ResourceGroup("exampleResourceGroup", ResourceGroupArgs.builder()
* .name("rg-example")
* .location("West Europe")
* .build());
* final var example = StreamanalyticsFunctions.getJob(GetJobArgs.builder()
* .name("example-job")
* .resourceGroupName(exampleResourceGroup.name())
* .build());
* var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
* .name("examplesa")
* .resourceGroupName(exampleResourceGroup.name())
* .location(exampleResourceGroup.location())
* .accountTier("Standard")
* .accountReplicationType("LRS")
* .build());
* var exampleTable = new Table("exampleTable", TableArgs.builder()
* .name("exampletable")
* .storageAccountName(exampleAccount.name())
* .build());
* var exampleOutputTable = new OutputTable("exampleOutputTable", OutputTableArgs.builder()
* .name("output-to-storage-table")
* .streamAnalyticsJobName(example.applyValue(getJobResult -> getJobResult).applyValue(example -> example.applyValue(getJobResult -> getJobResult.name())))
* .resourceGroupName(example.applyValue(getJobResult -> getJobResult).applyValue(example -> example.applyValue(getJobResult -> getJobResult.resourceGroupName())))
* .storageAccountName(exampleAccount.name())
* .storageAccountKey(exampleAccount.primaryAccessKey())
* .table(exampleTable.name())
* .partitionKey("foo")
* .rowKey("bar")
* .batchSize(100)
* .build());
* }
* }
* ```
* ```yaml
* resources:
* exampleResourceGroup:
* type: azure:core:ResourceGroup
* name: example
* properties:
* name: rg-example
* location: West Europe
* exampleAccount:
* type: azure:storage:Account
* name: example
* properties:
* name: examplesa
* resourceGroupName: ${exampleResourceGroup.name}
* location: ${exampleResourceGroup.location}
* accountTier: Standard
* accountReplicationType: LRS
* exampleTable:
* type: azure:storage:Table
* name: example
* properties:
* name: exampletable
* storageAccountName: ${exampleAccount.name}
* exampleOutputTable:
* type: azure:streamanalytics:OutputTable
* name: example
* properties:
* name: output-to-storage-table
* streamAnalyticsJobName: ${example.name}
* resourceGroupName: ${example.resourceGroupName}
* storageAccountName: ${exampleAccount.name}
* storageAccountKey: ${exampleAccount.primaryAccessKey}
* table: ${exampleTable.name}
* partitionKey: foo
* rowKey: bar
* batchSize: 100
* variables:
* example:
* fn::invoke:
* Function: azure:streamanalytics:getJob
* Arguments:
* name: example-job
* resourceGroupName: ${exampleResourceGroup.name}
* ```
*
* ## Import
* Stream Analytics Output to Table can be imported using the `resource id`, e.g.
* ```sh
* $ pulumi import azure:streamanalytics/outputTable:OutputTable example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.StreamAnalytics/streamingJobs/job1/outputs/output1
* ```
* @property batchSize The number of records for a batch operation. Must be between `1` and `100`.
* @property columnsToRemoves A list of the column names to be removed from output event entities.
* @property name The name of the Stream Output. Changing this forces a new resource to be created.
* @property partitionKey The name of the output column that contains the partition key.
* @property resourceGroupName The name of the Resource Group where the Stream Analytics Job exists. Changing this forces a new resource to be created.
* @property rowKey The name of the output column that contains the row key.
* @property storageAccountKey The Access Key which should be used to connect to this Storage Account.
* @property storageAccountName The name of the Storage Account.
* @property streamAnalyticsJobName The name of the Stream Analytics Job. Changing this forces a new resource to be created.
* @property table The name of the table where the stream should be output to.
*/
public data class OutputTableArgs(
public val batchSize: Output? = null,
public val columnsToRemoves: Output>? = null,
public val name: Output? = null,
public val partitionKey: Output? = null,
public val resourceGroupName: Output? = null,
public val rowKey: Output? = null,
public val storageAccountKey: Output? = null,
public val storageAccountName: Output? = null,
public val streamAnalyticsJobName: Output? = null,
public val table: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.azure.streamanalytics.OutputTableArgs =
com.pulumi.azure.streamanalytics.OutputTableArgs.builder()
.batchSize(batchSize?.applyValue({ args0 -> args0 }))
.columnsToRemoves(columnsToRemoves?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
.name(name?.applyValue({ args0 -> args0 }))
.partitionKey(partitionKey?.applyValue({ args0 -> args0 }))
.resourceGroupName(resourceGroupName?.applyValue({ args0 -> args0 }))
.rowKey(rowKey?.applyValue({ args0 -> args0 }))
.storageAccountKey(storageAccountKey?.applyValue({ args0 -> args0 }))
.storageAccountName(storageAccountName?.applyValue({ args0 -> args0 }))
.streamAnalyticsJobName(streamAnalyticsJobName?.applyValue({ args0 -> args0 }))
.table(table?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [OutputTableArgs].
*/
@PulumiTagMarker
public class OutputTableArgsBuilder internal constructor() {
private var batchSize: Output? = null
private var columnsToRemoves: Output>? = null
private var name: Output? = null
private var partitionKey: Output? = null
private var resourceGroupName: Output? = null
private var rowKey: Output? = null
private var storageAccountKey: Output? = null
private var storageAccountName: Output? = null
private var streamAnalyticsJobName: Output? = null
private var table: Output? = null
/**
* @param value The number of records for a batch operation. Must be between `1` and `100`.
*/
@JvmName("geccyyktxxjclgds")
public suspend fun batchSize(`value`: Output) {
this.batchSize = value
}
/**
* @param value A list of the column names to be removed from output event entities.
*/
@JvmName("aqgdskwqrtinwhmf")
public suspend fun columnsToRemoves(`value`: Output>) {
this.columnsToRemoves = value
}
@JvmName("sambofunxcmjxhtq")
public suspend fun columnsToRemoves(vararg values: Output) {
this.columnsToRemoves = Output.all(values.asList())
}
/**
* @param values A list of the column names to be removed from output event entities.
*/
@JvmName("gekamlkuxsxvscuk")
public suspend fun columnsToRemoves(values: List
© 2015 - 2025 Weber Informatics LLC | Privacy Policy