com.pulumi.gcp.biglake.kotlin.TableArgs.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.biglake.kotlin
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.biglake.TableArgs.builder
import com.pulumi.gcp.biglake.kotlin.inputs.TableHiveOptionsArgs
import com.pulumi.gcp.biglake.kotlin.inputs.TableHiveOptionsArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.jvm.JvmName
/**
* Represents a table.
* To get more information about Table, see:
* * [API documentation](https://cloud.google.com/bigquery/docs/reference/biglake/rest/v1/projects.locations.catalogs.databases.tables)
* * How-to Guides
* * [Manage open source metadata with BigLake Metastore](https://cloud.google.com/bigquery/docs/manage-open-source-metadata#create_tables)
* ## Example Usage
* ### Biglake Table
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const catalog = new gcp.biglake.Catalog("catalog", {
* name: "my_catalog",
* location: "US",
* });
* const bucket = new gcp.storage.Bucket("bucket", {
* name: "my_bucket",
* location: "US",
* forceDestroy: true,
* uniformBucketLevelAccess: true,
* });
* const metadataFolder = new gcp.storage.BucketObject("metadata_folder", {
* name: "metadata/",
* content: " ",
* bucket: bucket.name,
* });
* const dataFolder = new gcp.storage.BucketObject("data_folder", {
* name: "data/",
* content: " ",
* bucket: bucket.name,
* });
* const database = new gcp.biglake.Database("database", {
* name: "my_database",
* catalog: catalog.id,
* type: "HIVE",
* hiveOptions: {
* locationUri: pulumi.interpolate`gs://${bucket.name}/${metadataFolder.name}`,
* parameters: {
* owner: "Alex",
* },
* },
* });
* const table = new gcp.biglake.Table("table", {
* name: "my_table",
* database: database.id,
* type: "HIVE",
* hiveOptions: {
* tableType: "MANAGED_TABLE",
* storageDescriptor: {
* locationUri: pulumi.interpolate`gs://${bucket.name}/${dataFolder.name}`,
* inputFormat: "org.apache.hadoop.mapred.SequenceFileInputFormat",
* outputFormat: "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* },
* parameters: {
* "spark.sql.create.version": "3.1.3",
* "spark.sql.sources.schema.numParts": "1",
* transient_lastDdlTime: "1680894197",
* "spark.sql.partitionProvider": "catalog",
* owner: "John Doe",
* "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
* "spark.sql.sources.provider": "iceberg",
* provider: "iceberg",
* },
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* catalog = gcp.biglake.Catalog("catalog",
* name="my_catalog",
* location="US")
* bucket = gcp.storage.Bucket("bucket",
* name="my_bucket",
* location="US",
* force_destroy=True,
* uniform_bucket_level_access=True)
* metadata_folder = gcp.storage.BucketObject("metadata_folder",
* name="metadata/",
* content=" ",
* bucket=bucket.name)
* data_folder = gcp.storage.BucketObject("data_folder",
* name="data/",
* content=" ",
* bucket=bucket.name)
* database = gcp.biglake.Database("database",
* name="my_database",
* catalog=catalog.id,
* type="HIVE",
* hive_options=gcp.biglake.DatabaseHiveOptionsArgs(
* location_uri=pulumi.Output.all(bucket.name, metadata_folder.name).apply(lambda bucketName, metadataFolderName: f"gs://{bucket_name}/{metadata_folder_name}"),
* parameters={
* "owner": "Alex",
* },
* ))
* table = gcp.biglake.Table("table",
* name="my_table",
* database=database.id,
* type="HIVE",
* hive_options=gcp.biglake.TableHiveOptionsArgs(
* table_type="MANAGED_TABLE",
* storage_descriptor=gcp.biglake.TableHiveOptionsStorageDescriptorArgs(
* location_uri=pulumi.Output.all(bucket.name, data_folder.name).apply(lambda bucketName, dataFolderName: f"gs://{bucket_name}/{data_folder_name}"),
* input_format="org.apache.hadoop.mapred.SequenceFileInputFormat",
* output_format="org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* ),
* parameters={
* "spark.sql.create.version": "3.1.3",
* "spark.sql.sources.schema.numParts": "1",
* "transient_lastDdlTime": "1680894197",
* "spark.sql.partitionProvider": "catalog",
* "owner": "John Doe",
* "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
* "spark.sql.sources.provider": "iceberg",
* "provider": "iceberg",
* },
* ))
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var catalog = new Gcp.BigLake.Catalog("catalog", new()
* {
* Name = "my_catalog",
* Location = "US",
* });
* var bucket = new Gcp.Storage.Bucket("bucket", new()
* {
* Name = "my_bucket",
* Location = "US",
* ForceDestroy = true,
* UniformBucketLevelAccess = true,
* });
* var metadataFolder = new Gcp.Storage.BucketObject("metadata_folder", new()
* {
* Name = "metadata/",
* Content = " ",
* Bucket = bucket.Name,
* });
* var dataFolder = new Gcp.Storage.BucketObject("data_folder", new()
* {
* Name = "data/",
* Content = " ",
* Bucket = bucket.Name,
* });
* var database = new Gcp.BigLake.Database("database", new()
* {
* Name = "my_database",
* Catalog = catalog.Id,
* Type = "HIVE",
* HiveOptions = new Gcp.BigLake.Inputs.DatabaseHiveOptionsArgs
* {
* LocationUri = Output.Tuple(bucket.Name, metadataFolder.Name).Apply(values =>
* {
* var bucketName = values.Item1;
* var metadataFolderName = values.Item2;
* return $"gs://{bucketName}/{metadataFolderName}";
* }),
* Parameters =
* {
* { "owner", "Alex" },
* },
* },
* });
* var table = new Gcp.BigLake.Table("table", new()
* {
* Name = "my_table",
* Database = database.Id,
* Type = "HIVE",
* HiveOptions = new Gcp.BigLake.Inputs.TableHiveOptionsArgs
* {
* TableType = "MANAGED_TABLE",
* StorageDescriptor = new Gcp.BigLake.Inputs.TableHiveOptionsStorageDescriptorArgs
* {
* LocationUri = Output.Tuple(bucket.Name, dataFolder.Name).Apply(values =>
* {
* var bucketName = values.Item1;
* var dataFolderName = values.Item2;
* return $"gs://{bucketName}/{dataFolderName}";
* }),
* InputFormat = "org.apache.hadoop.mapred.SequenceFileInputFormat",
* OutputFormat = "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* },
* Parameters =
* {
* { "spark.sql.create.version", "3.1.3" },
* { "spark.sql.sources.schema.numParts", "1" },
* { "transient_lastDdlTime", "1680894197" },
* { "spark.sql.partitionProvider", "catalog" },
* { "owner", "John Doe" },
* { "spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}" },
* { "spark.sql.sources.provider", "iceberg" },
* { "provider", "iceberg" },
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "fmt"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/biglake"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* catalog, err := biglake.NewCatalog(ctx, "catalog", &biglake.CatalogArgs{
* Name: pulumi.String("my_catalog"),
* Location: pulumi.String("US"),
* })
* if err != nil {
* return err
* }
* bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
* Name: pulumi.String("my_bucket"),
* Location: pulumi.String("US"),
* ForceDestroy: pulumi.Bool(true),
* UniformBucketLevelAccess: pulumi.Bool(true),
* })
* if err != nil {
* return err
* }
* metadataFolder, err := storage.NewBucketObject(ctx, "metadata_folder", &storage.BucketObjectArgs{
* Name: pulumi.String("metadata/"),
* Content: pulumi.String(" "),
* Bucket: bucket.Name,
* })
* if err != nil {
* return err
* }
* dataFolder, err := storage.NewBucketObject(ctx, "data_folder", &storage.BucketObjectArgs{
* Name: pulumi.String("data/"),
* Content: pulumi.String(" "),
* Bucket: bucket.Name,
* })
* if err != nil {
* return err
* }
* database, err := biglake.NewDatabase(ctx, "database", &biglake.DatabaseArgs{
* Name: pulumi.String("my_database"),
* Catalog: catalog.ID(),
* Type: pulumi.String("HIVE"),
* HiveOptions: &biglake.DatabaseHiveOptionsArgs{
* LocationUri: pulumi.All(bucket.Name, metadataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
* bucketName := _args[0].(string)
* metadataFolderName := _args[1].(string)
* return fmt.Sprintf("gs://%v/%v", bucketName, metadataFolderName), nil
* }).(pulumi.StringOutput),
* Parameters: pulumi.StringMap{
* "owner": pulumi.String("Alex"),
* },
* },
* })
* if err != nil {
* return err
* }
* _, err = biglake.NewTable(ctx, "table", &biglake.TableArgs{
* Name: pulumi.String("my_table"),
* Database: database.ID(),
* Type: pulumi.String("HIVE"),
* HiveOptions: &biglake.TableHiveOptionsArgs{
* TableType: pulumi.String("MANAGED_TABLE"),
* StorageDescriptor: &biglake.TableHiveOptionsStorageDescriptorArgs{
* LocationUri: pulumi.All(bucket.Name, dataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
* bucketName := _args[0].(string)
* dataFolderName := _args[1].(string)
* return fmt.Sprintf("gs://%v/%v", bucketName, dataFolderName), nil
* }).(pulumi.StringOutput),
* InputFormat: pulumi.String("org.apache.hadoop.mapred.SequenceFileInputFormat"),
* OutputFormat: pulumi.String("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
* },
* Parameters: pulumi.StringMap{
* "spark.sql.create.version": pulumi.String("3.1.3"),
* "spark.sql.sources.schema.numParts": pulumi.String("1"),
* "transient_lastDdlTime": pulumi.String("1680894197"),
* "spark.sql.partitionProvider": pulumi.String("catalog"),
* "owner": pulumi.String("John Doe"),
* "spark.sql.sources.schema.part.0": pulumi.String("{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
* "spark.sql.sources.provider": pulumi.String("iceberg"),
* "provider": pulumi.String("iceberg"),
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.biglake.Catalog;
* import com.pulumi.gcp.biglake.CatalogArgs;
* import com.pulumi.gcp.storage.Bucket;
* import com.pulumi.gcp.storage.BucketArgs;
* import com.pulumi.gcp.storage.BucketObject;
* import com.pulumi.gcp.storage.BucketObjectArgs;
* import com.pulumi.gcp.biglake.Database;
* import com.pulumi.gcp.biglake.DatabaseArgs;
* import com.pulumi.gcp.biglake.inputs.DatabaseHiveOptionsArgs;
* import com.pulumi.gcp.biglake.Table;
* import com.pulumi.gcp.biglake.TableArgs;
* import com.pulumi.gcp.biglake.inputs.TableHiveOptionsArgs;
* import com.pulumi.gcp.biglake.inputs.TableHiveOptionsStorageDescriptorArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var catalog = new Catalog("catalog", CatalogArgs.builder()
* .name("my_catalog")
* .location("US")
* .build());
* var bucket = new Bucket("bucket", BucketArgs.builder()
* .name("my_bucket")
* .location("US")
* .forceDestroy(true)
* .uniformBucketLevelAccess(true)
* .build());
* var metadataFolder = new BucketObject("metadataFolder", BucketObjectArgs.builder()
* .name("metadata/")
* .content(" ")
* .bucket(bucket.name())
* .build());
* var dataFolder = new BucketObject("dataFolder", BucketObjectArgs.builder()
* .name("data/")
* .content(" ")
* .bucket(bucket.name())
* .build());
* var database = new Database("database", DatabaseArgs.builder()
* .name("my_database")
* .catalog(catalog.id())
* .type("HIVE")
* .hiveOptions(DatabaseHiveOptionsArgs.builder()
* .locationUri(Output.tuple(bucket.name(), metadataFolder.name()).applyValue(values -> {
* var bucketName = values.t1;
* var metadataFolderName = values.t2;
* return String.format("gs://%s/%s", bucketName,metadataFolderName);
* }))
* .parameters(Map.of("owner", "Alex"))
* .build())
* .build());
* var table = new Table("table", TableArgs.builder()
* .name("my_table")
* .database(database.id())
* .type("HIVE")
* .hiveOptions(TableHiveOptionsArgs.builder()
* .tableType("MANAGED_TABLE")
* .storageDescriptor(TableHiveOptionsStorageDescriptorArgs.builder()
* .locationUri(Output.tuple(bucket.name(), dataFolder.name()).applyValue(values -> {
* var bucketName = values.t1;
* var dataFolderName = values.t2;
* return String.format("gs://%s/%s", bucketName,dataFolderName);
* }))
* .inputFormat("org.apache.hadoop.mapred.SequenceFileInputFormat")
* .outputFormat("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat")
* .build())
* .parameters(Map.ofEntries(
* Map.entry("spark.sql.create.version", "3.1.3"),
* Map.entry("spark.sql.sources.schema.numParts", "1"),
* Map.entry("transient_lastDdlTime", "1680894197"),
* Map.entry("spark.sql.partitionProvider", "catalog"),
* Map.entry("owner", "John Doe"),
* Map.entry("spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
* Map.entry("spark.sql.sources.provider", "iceberg"),
* Map.entry("provider", "iceberg")
* ))
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* catalog:
* type: gcp:biglake:Catalog
* properties:
* name: my_catalog
* location: US
* bucket:
* type: gcp:storage:Bucket
* properties:
* name: my_bucket
* location: US
* forceDestroy: true
* uniformBucketLevelAccess: true
* metadataFolder:
* type: gcp:storage:BucketObject
* name: metadata_folder
* properties:
* name: metadata/
* content: ' '
* bucket: ${bucket.name}
* dataFolder:
* type: gcp:storage:BucketObject
* name: data_folder
* properties:
* name: data/
* content: ' '
* bucket: ${bucket.name}
* database:
* type: gcp:biglake:Database
* properties:
* name: my_database
* catalog: ${catalog.id}
* type: HIVE
* hiveOptions:
* locationUri: gs://${bucket.name}/${metadataFolder.name}
* parameters:
* owner: Alex
* table:
* type: gcp:biglake:Table
* properties:
* name: my_table
* database: ${database.id}
* type: HIVE
* hiveOptions:
* tableType: MANAGED_TABLE
* storageDescriptor:
* locationUri: gs://${bucket.name}/${dataFolder.name}
* inputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat
* outputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
* parameters:
* spark.sql.create.version: 3.1.3
* spark.sql.sources.schema.numParts: '1'
* transient_lastDdlTime: '1680894197'
* spark.sql.partitionProvider: catalog
* owner: John Doe
* spark.sql.sources.schema.part.0: '{"type":"struct","fields":[{"name":"id","type":"integer","nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"age","type":"integer","nullable":true,"metadata":{}}]}'
* spark.sql.sources.provider: iceberg
* provider: iceberg
* ```
*
* ## Import
* Table can be imported using any of these accepted formats:
* * `{{database}}/tables/{{name}}`
* When using the `pulumi import` command, Table can be imported using one of the formats above. For example:
* ```sh
* $ pulumi import gcp:biglake/table:Table default {{database}}/tables/{{name}}
* ```
* @property database The id of the parent database.
* @property hiveOptions Options of a Hive table.
* Structure is documented below.
* @property name Output only. The name of the Table. Format:
* projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}
* - - -
* @property type The database type.
* Possible values are: `HIVE`.
*/
public data class TableArgs(
public val database: Output? = null,
public val hiveOptions: Output? = null,
public val name: Output? = null,
public val type: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.biglake.TableArgs =
com.pulumi.gcp.biglake.TableArgs.builder()
.database(database?.applyValue({ args0 -> args0 }))
.hiveOptions(hiveOptions?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.name(name?.applyValue({ args0 -> args0 }))
.type(type?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [TableArgs].
*/
@PulumiTagMarker
public class TableArgsBuilder internal constructor() {
private var database: Output? = null
private var hiveOptions: Output? = null
private var name: Output? = null
private var type: Output? = null
/**
* @param value The id of the parent database.
*/
@JvmName("ghobfkssqrjlghtd")
public suspend fun database(`value`: Output) {
this.database = value
}
/**
* @param value Options of a Hive table.
* Structure is documented below.
*/
@JvmName("ebmdjbtnukklower")
public suspend fun hiveOptions(`value`: Output) {
this.hiveOptions = value
}
/**
* @param value Output only. The name of the Table. Format:
* projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}
* - - -
*/
@JvmName("vjfygohxbjjbvtfr")
public suspend fun name(`value`: Output) {
this.name = value
}
/**
* @param value The database type.
* Possible values are: `HIVE`.
*/
@JvmName("prqhxdidrafxwyli")
public suspend fun type(`value`: Output) {
this.type = value
}
/**
* @param value The id of the parent database.
*/
@JvmName("oqktkoqllynscfiu")
public suspend fun database(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.database = mapped
}
/**
* @param value Options of a Hive table.
* Structure is documented below.
*/
@JvmName("hbcyjfobltecaosn")
public suspend fun hiveOptions(`value`: TableHiveOptionsArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.hiveOptions = mapped
}
/**
* @param argument Options of a Hive table.
* Structure is documented below.
*/
@JvmName("qpjnkcirmctbrcep")
public suspend fun hiveOptions(argument: suspend TableHiveOptionsArgsBuilder.() -> Unit) {
val toBeMapped = TableHiveOptionsArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.hiveOptions = mapped
}
/**
* @param value Output only. The name of the Table. Format:
* projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}
* - - -
*/
@JvmName("mruvkxjlwrxlommr")
public suspend fun name(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.name = mapped
}
/**
* @param value The database type.
* Possible values are: `HIVE`.
*/
@JvmName("cxawadvgeqidaqty")
public suspend fun type(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.type = mapped
}
internal fun build(): TableArgs = TableArgs(
database = database,
hiveOptions = hiveOptions,
name = name,
type = type,
)
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy