Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.pulumi.gcp.managedkafka.kotlin.TopicArgs.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.managedkafka.kotlin
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.managedkafka.TopicArgs.builder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.Int
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.collections.Map
import kotlin.jvm.JvmName
/**
* ## Example Usage
* ### Managedkafka Topic Basic
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const project = gcp.organizations.getProject({});
* const cluster = new gcp.managedkafka.Cluster("cluster", {
* clusterId: "my-cluster",
* location: "us-central1",
* capacityConfig: {
* vcpuCount: "3",
* memoryBytes: "3221225472",
* },
* gcpConfig: {
* accessConfig: {
* networkConfigs: [{
* subnet: project.then(project => `projects/${project.number}/regions/us-central1/subnetworks/default`),
* }],
* },
* },
* });
* const example = new gcp.managedkafka.Topic("example", {
* topicId: "my-topic",
* cluster: cluster.clusterId,
* location: "us-central1",
* partitionCount: 2,
* replicationFactor: 3,
* configs: {
* "cleanup.policy": "compact",
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* project = gcp.organizations.get_project()
* cluster = gcp.managedkafka.Cluster("cluster",
* cluster_id="my-cluster",
* location="us-central1",
* capacity_config={
* "vcpu_count": "3",
* "memory_bytes": "3221225472",
* },
* gcp_config={
* "access_config": {
* "network_configs": [{
* "subnet": f"projects/{project.number}/regions/us-central1/subnetworks/default",
* }],
* },
* })
* example = gcp.managedkafka.Topic("example",
* topic_id="my-topic",
* cluster=cluster.cluster_id,
* location="us-central1",
* partition_count=2,
* replication_factor=3,
* configs={
* "cleanup.policy": "compact",
* })
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var project = Gcp.Organizations.GetProject.Invoke();
* var cluster = new Gcp.ManagedKafka.Cluster("cluster", new()
* {
* ClusterId = "my-cluster",
* Location = "us-central1",
* CapacityConfig = new Gcp.ManagedKafka.Inputs.ClusterCapacityConfigArgs
* {
* VcpuCount = "3",
* MemoryBytes = "3221225472",
* },
* GcpConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigArgs
* {
* AccessConfig = new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigArgs
* {
* NetworkConfigs = new[]
* {
* new Gcp.ManagedKafka.Inputs.ClusterGcpConfigAccessConfigNetworkConfigArgs
* {
* Subnet = $"projects/{project.Apply(getProjectResult => getProjectResult.Number)}/regions/us-central1/subnetworks/default",
* },
* },
* },
* },
* });
* var example = new Gcp.ManagedKafka.Topic("example", new()
* {
* TopicId = "my-topic",
* Cluster = cluster.ClusterId,
* Location = "us-central1",
* PartitionCount = 2,
* ReplicationFactor = 3,
* Configs =
* {
* { "cleanup.policy", "compact" },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "fmt"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/managedkafka"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/organizations"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* project, err := organizations.LookupProject(ctx, nil, nil)
* if err != nil {
* return err
* }
* cluster, err := managedkafka.NewCluster(ctx, "cluster", &managedkafka.ClusterArgs{
* ClusterId: pulumi.String("my-cluster"),
* Location: pulumi.String("us-central1"),
* CapacityConfig: &managedkafka.ClusterCapacityConfigArgs{
* VcpuCount: pulumi.String("3"),
* MemoryBytes: pulumi.String("3221225472"),
* },
* GcpConfig: &managedkafka.ClusterGcpConfigArgs{
* AccessConfig: &managedkafka.ClusterGcpConfigAccessConfigArgs{
* NetworkConfigs: managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArray{
* &managedkafka.ClusterGcpConfigAccessConfigNetworkConfigArgs{
* Subnet: pulumi.Sprintf("projects/%v/regions/us-central1/subnetworks/default", project.Number),
* },
* },
* },
* },
* })
* if err != nil {
* return err
* }
* _, err = managedkafka.NewTopic(ctx, "example", &managedkafka.TopicArgs{
* TopicId: pulumi.String("my-topic"),
* Cluster: cluster.ClusterId,
* Location: pulumi.String("us-central1"),
* PartitionCount: pulumi.Int(2),
* ReplicationFactor: pulumi.Int(3),
* Configs: pulumi.StringMap{
* "cleanup.policy": pulumi.String("compact"),
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.organizations.OrganizationsFunctions;
* import com.pulumi.gcp.organizations.inputs.GetProjectArgs;
* import com.pulumi.gcp.managedkafka.Cluster;
* import com.pulumi.gcp.managedkafka.ClusterArgs;
* import com.pulumi.gcp.managedkafka.inputs.ClusterCapacityConfigArgs;
* import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigArgs;
* import com.pulumi.gcp.managedkafka.inputs.ClusterGcpConfigAccessConfigArgs;
* import com.pulumi.gcp.managedkafka.Topic;
* import com.pulumi.gcp.managedkafka.TopicArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* final var project = OrganizationsFunctions.getProject();
* var cluster = new Cluster("cluster", ClusterArgs.builder()
* .clusterId("my-cluster")
* .location("us-central1")
* .capacityConfig(ClusterCapacityConfigArgs.builder()
* .vcpuCount(3)
* .memoryBytes(3221225472)
* .build())
* .gcpConfig(ClusterGcpConfigArgs.builder()
* .accessConfig(ClusterGcpConfigAccessConfigArgs.builder()
* .networkConfigs(ClusterGcpConfigAccessConfigNetworkConfigArgs.builder()
* .subnet(String.format("projects/%s/regions/us-central1/subnetworks/default", project.applyValue(getProjectResult -> getProjectResult.number())))
* .build())
* .build())
* .build())
* .build());
* var example = new Topic("example", TopicArgs.builder()
* .topicId("my-topic")
* .cluster(cluster.clusterId())
* .location("us-central1")
* .partitionCount(2)
* .replicationFactor(3)
* .configs(Map.of("cleanup.policy", "compact"))
* .build());
* }
* }
* ```
* ```yaml
* resources:
* cluster:
* type: gcp:managedkafka:Cluster
* properties:
* clusterId: my-cluster
* location: us-central1
* capacityConfig:
* vcpuCount: 3
* memoryBytes: 3.221225472e+09
* gcpConfig:
* accessConfig:
* networkConfigs:
* - subnet: projects/${project.number}/regions/us-central1/subnetworks/default
* example:
* type: gcp:managedkafka:Topic
* properties:
* topicId: my-topic
* cluster: ${cluster.clusterId}
* location: us-central1
* partitionCount: 2
* replicationFactor: 3
* configs:
* cleanup.policy: compact
* variables:
* project:
* fn::invoke:
* Function: gcp:organizations:getProject
* Arguments: {}
* ```
*
* ## Import
* Topic can be imported using any of these accepted formats:
* * `projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}}`
* * `{{project}}/{{location}}/{{cluster}}/{{topic_id}}`
* * `{{location}}/{{cluster}}/{{topic_id}}`
* When using the `pulumi import` command, Topic can be imported using one of the formats above. For example:
* ```sh
* $ pulumi import gcp:managedkafka/topic:Topic default projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}}
* ```
* ```sh
* $ pulumi import gcp:managedkafka/topic:Topic default {{project}}/{{location}}/{{cluster}}/{{topic_id}}
* ```
* ```sh
* $ pulumi import gcp:managedkafka/topic:Topic default {{location}}/{{cluster}}/{{topic_id}}
* ```
* @property cluster The cluster name.
* @property configs Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: `cleanup.policy=compact`, `compression.type=producer`.
* @property location ID of the location of the Kafka resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
* @property partitionCount The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
* @property project The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
* @property replicationFactor The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
* @property topicId The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: `my-topic-name`.
* - - -
*/
public data class TopicArgs(
public val cluster: Output? = null,
public val configs: Output>? = null,
public val location: Output? = null,
public val partitionCount: Output? = null,
public val project: Output? = null,
public val replicationFactor: Output? = null,
public val topicId: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.managedkafka.TopicArgs =
com.pulumi.gcp.managedkafka.TopicArgs.builder()
.cluster(cluster?.applyValue({ args0 -> args0 }))
.configs(configs?.applyValue({ args0 -> args0.map({ args0 -> args0.key.to(args0.value) }).toMap() }))
.location(location?.applyValue({ args0 -> args0 }))
.partitionCount(partitionCount?.applyValue({ args0 -> args0 }))
.project(project?.applyValue({ args0 -> args0 }))
.replicationFactor(replicationFactor?.applyValue({ args0 -> args0 }))
.topicId(topicId?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [TopicArgs].
*/
@PulumiTagMarker
public class TopicArgsBuilder internal constructor() {
private var cluster: Output? = null
private var configs: Output>? = null
private var location: Output? = null
private var partitionCount: Output? = null
private var project: Output? = null
private var replicationFactor: Output? = null
private var topicId: Output? = null
/**
* @param value The cluster name.
*/
@JvmName("qfxrnafsiikmkoqx")
public suspend fun cluster(`value`: Output) {
this.cluster = value
}
/**
* @param value Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: `cleanup.policy=compact`, `compression.type=producer`.
*/
@JvmName("fbulvcmpapmclbkv")
public suspend fun configs(`value`: Output>) {
this.configs = value
}
/**
* @param value ID of the location of the Kafka resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
*/
@JvmName("tohjuuibolpqxitw")
public suspend fun location(`value`: Output) {
this.location = value
}
/**
* @param value The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
*/
@JvmName("iipjfibkbulecpil")
public suspend fun partitionCount(`value`: Output) {
this.partitionCount = value
}
/**
* @param value The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
@JvmName("drxybxisanlpcuoc")
public suspend fun project(`value`: Output) {
this.project = value
}
/**
* @param value The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
*/
@JvmName("gtpudrojlqwlfwlx")
public suspend fun replicationFactor(`value`: Output) {
this.replicationFactor = value
}
/**
* @param value The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: `my-topic-name`.
* - - -
*/
@JvmName("bceajunokhngghuc")
public suspend fun topicId(`value`: Output) {
this.topicId = value
}
/**
* @param value The cluster name.
*/
@JvmName("uyjlmneykjmskdty")
public suspend fun cluster(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.cluster = mapped
}
/**
* @param value Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: `cleanup.policy=compact`, `compression.type=producer`.
*/
@JvmName("gfkblgrggtlbsdgs")
public suspend fun configs(`value`: Map?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.configs = mapped
}
/**
* @param values Configuration for the topic that are overridden from the cluster defaults. The key of the map is a Kafka topic property name, for example: `cleanup.policy=compact`, `compression.type=producer`.
*/
@JvmName("homwjhfjmeebadml")
public fun configs(vararg values: Pair) {
val toBeMapped = values.toMap()
val mapped = toBeMapped.let({ args0 -> of(args0) })
this.configs = mapped
}
/**
* @param value ID of the location of the Kafka resource. See https://cloud.google.com/managed-kafka/docs/locations for a list of supported locations.
*/
@JvmName("dmxjjumflxhywgoq")
public suspend fun location(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.location = mapped
}
/**
* @param value The number of partitions in a topic. You can increase the partition count for a topic, but you cannot decrease it. Increasing partitions for a topic that uses a key might change how messages are distributed.
*/
@JvmName("dasvordiitejjuau")
public suspend fun partitionCount(`value`: Int?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.partitionCount = mapped
}
/**
* @param value The ID of the project in which the resource belongs.
* If it is not provided, the provider project is used.
*/
@JvmName("gliyhjapfvlwucoe")
public suspend fun project(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.project = mapped
}
/**
* @param value The number of replicas of each partition. A replication factor of 3 is recommended for high availability.
*/
@JvmName("xfmeiuqqsfxjvkdr")
public suspend fun replicationFactor(`value`: Int?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.replicationFactor = mapped
}
/**
* @param value The ID to use for the topic, which will become the final component of the topic's name. This value is structured like: `my-topic-name`.
* - - -
*/
@JvmName("qgblrmlfgmilncaq")
public suspend fun topicId(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.topicId = mapped
}
internal fun build(): TopicArgs = TopicArgs(
cluster = cluster,
configs = configs,
location = location,
partitionCount = partitionCount,
project = project,
replicationFactor = replicationFactor,
topicId = topicId,
)
}