com.pulumi.gcp.biglake.kotlin.Table.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.biglake.kotlin
import com.pulumi.core.Output
import com.pulumi.gcp.biglake.kotlin.outputs.TableHiveOptions
import com.pulumi.gcp.biglake.kotlin.outputs.TableHiveOptions.Companion.toKotlin
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
/**
* Builder for [Table].
*/
@PulumiTagMarker
public class TableResourceBuilder internal constructor() {
public var name: String? = null
public var args: TableArgs = TableArgs()
public var opts: CustomResourceOptions = CustomResourceOptions()
/**
* @param name The _unique_ name of the resulting resource.
*/
public fun name(`value`: String) {
this.name = value
}
/**
* @param block The arguments to use to populate this resource's properties.
*/
public suspend fun args(block: suspend TableArgsBuilder.() -> Unit) {
val builder = TableArgsBuilder()
block(builder)
this.args = builder.build()
}
/**
* @param block A bag of options that control this resource's behavior.
*/
public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
}
internal fun build(): Table {
val builtJavaResource = com.pulumi.gcp.biglake.Table(
this.name,
this.args.toJava(),
this.opts.toJava(),
)
return Table(builtJavaResource)
}
}
/**
* Represents a table.
* To get more information about Table, see:
* * [API documentation](https://cloud.google.com/bigquery/docs/reference/biglake/rest/v1/projects.locations.catalogs.databases.tables)
* * How-to Guides
* * [Manage open source metadata with BigLake Metastore](https://cloud.google.com/bigquery/docs/manage-open-source-metadata#create_tables)
* ## Example Usage
* ### Biglake Table
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const catalog = new gcp.biglake.Catalog("catalog", {
* name: "my_catalog",
* location: "US",
* });
* const bucket = new gcp.storage.Bucket("bucket", {
* name: "my_bucket",
* location: "US",
* forceDestroy: true,
* uniformBucketLevelAccess: true,
* });
* const metadataFolder = new gcp.storage.BucketObject("metadata_folder", {
* name: "metadata/",
* content: " ",
* bucket: bucket.name,
* });
* const dataFolder = new gcp.storage.BucketObject("data_folder", {
* name: "data/",
* content: " ",
* bucket: bucket.name,
* });
* const database = new gcp.biglake.Database("database", {
* name: "my_database",
* catalog: catalog.id,
* type: "HIVE",
* hiveOptions: {
* locationUri: pulumi.interpolate`gs://${bucket.name}/${metadataFolder.name}`,
* parameters: {
* owner: "Alex",
* },
* },
* });
* const table = new gcp.biglake.Table("table", {
* name: "my_table",
* database: database.id,
* type: "HIVE",
* hiveOptions: {
* tableType: "MANAGED_TABLE",
* storageDescriptor: {
* locationUri: pulumi.interpolate`gs://${bucket.name}/${dataFolder.name}`,
* inputFormat: "org.apache.hadoop.mapred.SequenceFileInputFormat",
* outputFormat: "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* },
* parameters: {
* "spark.sql.create.version": "3.1.3",
* "spark.sql.sources.schema.numParts": "1",
* transient_lastDdlTime: "1680894197",
* "spark.sql.partitionProvider": "catalog",
* owner: "John Doe",
* "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
* "spark.sql.sources.provider": "iceberg",
* provider: "iceberg",
* },
* },
* });
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* catalog = gcp.biglake.Catalog("catalog",
* name="my_catalog",
* location="US")
* bucket = gcp.storage.Bucket("bucket",
* name="my_bucket",
* location="US",
* force_destroy=True,
* uniform_bucket_level_access=True)
* metadata_folder = gcp.storage.BucketObject("metadata_folder",
* name="metadata/",
* content=" ",
* bucket=bucket.name)
* data_folder = gcp.storage.BucketObject("data_folder",
* name="data/",
* content=" ",
* bucket=bucket.name)
* database = gcp.biglake.Database("database",
* name="my_database",
* catalog=catalog.id,
* type="HIVE",
* hive_options=gcp.biglake.DatabaseHiveOptionsArgs(
* location_uri=pulumi.Output.all(bucket.name, metadata_folder.name).apply(lambda bucketName, metadataFolderName: f"gs://{bucket_name}/{metadata_folder_name}"),
* parameters={
* "owner": "Alex",
* },
* ))
* table = gcp.biglake.Table("table",
* name="my_table",
* database=database.id,
* type="HIVE",
* hive_options=gcp.biglake.TableHiveOptionsArgs(
* table_type="MANAGED_TABLE",
* storage_descriptor=gcp.biglake.TableHiveOptionsStorageDescriptorArgs(
* location_uri=pulumi.Output.all(bucket.name, data_folder.name).apply(lambda bucketName, dataFolderName: f"gs://{bucket_name}/{data_folder_name}"),
* input_format="org.apache.hadoop.mapred.SequenceFileInputFormat",
* output_format="org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* ),
* parameters={
* "spark.sql.create.version": "3.1.3",
* "spark.sql.sources.schema.numParts": "1",
* "transient_lastDdlTime": "1680894197",
* "spark.sql.partitionProvider": "catalog",
* "owner": "John Doe",
* "spark.sql.sources.schema.part.0": "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}",
* "spark.sql.sources.provider": "iceberg",
* "provider": "iceberg",
* },
* ))
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var catalog = new Gcp.BigLake.Catalog("catalog", new()
* {
* Name = "my_catalog",
* Location = "US",
* });
* var bucket = new Gcp.Storage.Bucket("bucket", new()
* {
* Name = "my_bucket",
* Location = "US",
* ForceDestroy = true,
* UniformBucketLevelAccess = true,
* });
* var metadataFolder = new Gcp.Storage.BucketObject("metadata_folder", new()
* {
* Name = "metadata/",
* Content = " ",
* Bucket = bucket.Name,
* });
* var dataFolder = new Gcp.Storage.BucketObject("data_folder", new()
* {
* Name = "data/",
* Content = " ",
* Bucket = bucket.Name,
* });
* var database = new Gcp.BigLake.Database("database", new()
* {
* Name = "my_database",
* Catalog = catalog.Id,
* Type = "HIVE",
* HiveOptions = new Gcp.BigLake.Inputs.DatabaseHiveOptionsArgs
* {
* LocationUri = Output.Tuple(bucket.Name, metadataFolder.Name).Apply(values =>
* {
* var bucketName = values.Item1;
* var metadataFolderName = values.Item2;
* return $"gs://{bucketName}/{metadataFolderName}";
* }),
* Parameters =
* {
* { "owner", "Alex" },
* },
* },
* });
* var table = new Gcp.BigLake.Table("table", new()
* {
* Name = "my_table",
* Database = database.Id,
* Type = "HIVE",
* HiveOptions = new Gcp.BigLake.Inputs.TableHiveOptionsArgs
* {
* TableType = "MANAGED_TABLE",
* StorageDescriptor = new Gcp.BigLake.Inputs.TableHiveOptionsStorageDescriptorArgs
* {
* LocationUri = Output.Tuple(bucket.Name, dataFolder.Name).Apply(values =>
* {
* var bucketName = values.Item1;
* var dataFolderName = values.Item2;
* return $"gs://{bucketName}/{dataFolderName}";
* }),
* InputFormat = "org.apache.hadoop.mapred.SequenceFileInputFormat",
* OutputFormat = "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat",
* },
* Parameters =
* {
* { "spark.sql.create.version", "3.1.3" },
* { "spark.sql.sources.schema.numParts", "1" },
* { "transient_lastDdlTime", "1680894197" },
* { "spark.sql.partitionProvider", "catalog" },
* { "owner", "John Doe" },
* { "spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}" },
* { "spark.sql.sources.provider", "iceberg" },
* { "provider", "iceberg" },
* },
* },
* });
* });
* ```
* ```go
* package main
* import (
* "fmt"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/biglake"
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/storage"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* catalog, err := biglake.NewCatalog(ctx, "catalog", &biglake.CatalogArgs{
* Name: pulumi.String("my_catalog"),
* Location: pulumi.String("US"),
* })
* if err != nil {
* return err
* }
* bucket, err := storage.NewBucket(ctx, "bucket", &storage.BucketArgs{
* Name: pulumi.String("my_bucket"),
* Location: pulumi.String("US"),
* ForceDestroy: pulumi.Bool(true),
* UniformBucketLevelAccess: pulumi.Bool(true),
* })
* if err != nil {
* return err
* }
* metadataFolder, err := storage.NewBucketObject(ctx, "metadata_folder", &storage.BucketObjectArgs{
* Name: pulumi.String("metadata/"),
* Content: pulumi.String(" "),
* Bucket: bucket.Name,
* })
* if err != nil {
* return err
* }
* dataFolder, err := storage.NewBucketObject(ctx, "data_folder", &storage.BucketObjectArgs{
* Name: pulumi.String("data/"),
* Content: pulumi.String(" "),
* Bucket: bucket.Name,
* })
* if err != nil {
* return err
* }
* database, err := biglake.NewDatabase(ctx, "database", &biglake.DatabaseArgs{
* Name: pulumi.String("my_database"),
* Catalog: catalog.ID(),
* Type: pulumi.String("HIVE"),
* HiveOptions: &biglake.DatabaseHiveOptionsArgs{
* LocationUri: pulumi.All(bucket.Name, metadataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
* bucketName := _args[0].(string)
* metadataFolderName := _args[1].(string)
* return fmt.Sprintf("gs://%v/%v", bucketName, metadataFolderName), nil
* }).(pulumi.StringOutput),
* Parameters: pulumi.StringMap{
* "owner": pulumi.String("Alex"),
* },
* },
* })
* if err != nil {
* return err
* }
* _, err = biglake.NewTable(ctx, "table", &biglake.TableArgs{
* Name: pulumi.String("my_table"),
* Database: database.ID(),
* Type: pulumi.String("HIVE"),
* HiveOptions: &biglake.TableHiveOptionsArgs{
* TableType: pulumi.String("MANAGED_TABLE"),
* StorageDescriptor: &biglake.TableHiveOptionsStorageDescriptorArgs{
* LocationUri: pulumi.All(bucket.Name, dataFolder.Name).ApplyT(func(_args []interface{}) (string, error) {
* bucketName := _args[0].(string)
* dataFolderName := _args[1].(string)
* return fmt.Sprintf("gs://%v/%v", bucketName, dataFolderName), nil
* }).(pulumi.StringOutput),
* InputFormat: pulumi.String("org.apache.hadoop.mapred.SequenceFileInputFormat"),
* OutputFormat: pulumi.String("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
* },
* Parameters: pulumi.StringMap{
* "spark.sql.create.version": pulumi.String("3.1.3"),
* "spark.sql.sources.schema.numParts": pulumi.String("1"),
* "transient_lastDdlTime": pulumi.String("1680894197"),
* "spark.sql.partitionProvider": pulumi.String("catalog"),
* "owner": pulumi.String("John Doe"),
* "spark.sql.sources.schema.part.0": pulumi.String("{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
* "spark.sql.sources.provider": pulumi.String("iceberg"),
* "provider": pulumi.String("iceberg"),
* },
* },
* })
* if err != nil {
* return err
* }
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.biglake.Catalog;
* import com.pulumi.gcp.biglake.CatalogArgs;
* import com.pulumi.gcp.storage.Bucket;
* import com.pulumi.gcp.storage.BucketArgs;
* import com.pulumi.gcp.storage.BucketObject;
* import com.pulumi.gcp.storage.BucketObjectArgs;
* import com.pulumi.gcp.biglake.Database;
* import com.pulumi.gcp.biglake.DatabaseArgs;
* import com.pulumi.gcp.biglake.inputs.DatabaseHiveOptionsArgs;
* import com.pulumi.gcp.biglake.Table;
* import com.pulumi.gcp.biglake.TableArgs;
* import com.pulumi.gcp.biglake.inputs.TableHiveOptionsArgs;
* import com.pulumi.gcp.biglake.inputs.TableHiveOptionsStorageDescriptorArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var catalog = new Catalog("catalog", CatalogArgs.builder()
* .name("my_catalog")
* .location("US")
* .build());
* var bucket = new Bucket("bucket", BucketArgs.builder()
* .name("my_bucket")
* .location("US")
* .forceDestroy(true)
* .uniformBucketLevelAccess(true)
* .build());
* var metadataFolder = new BucketObject("metadataFolder", BucketObjectArgs.builder()
* .name("metadata/")
* .content(" ")
* .bucket(bucket.name())
* .build());
* var dataFolder = new BucketObject("dataFolder", BucketObjectArgs.builder()
* .name("data/")
* .content(" ")
* .bucket(bucket.name())
* .build());
* var database = new Database("database", DatabaseArgs.builder()
* .name("my_database")
* .catalog(catalog.id())
* .type("HIVE")
* .hiveOptions(DatabaseHiveOptionsArgs.builder()
* .locationUri(Output.tuple(bucket.name(), metadataFolder.name()).applyValue(values -> {
* var bucketName = values.t1;
* var metadataFolderName = values.t2;
* return String.format("gs://%s/%s", bucketName,metadataFolderName);
* }))
* .parameters(Map.of("owner", "Alex"))
* .build())
* .build());
* var table = new Table("table", TableArgs.builder()
* .name("my_table")
* .database(database.id())
* .type("HIVE")
* .hiveOptions(TableHiveOptionsArgs.builder()
* .tableType("MANAGED_TABLE")
* .storageDescriptor(TableHiveOptionsStorageDescriptorArgs.builder()
* .locationUri(Output.tuple(bucket.name(), dataFolder.name()).applyValue(values -> {
* var bucketName = values.t1;
* var dataFolderName = values.t2;
* return String.format("gs://%s/%s", bucketName,dataFolderName);
* }))
* .inputFormat("org.apache.hadoop.mapred.SequenceFileInputFormat")
* .outputFormat("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat")
* .build())
* .parameters(Map.ofEntries(
* Map.entry("spark.sql.create.version", "3.1.3"),
* Map.entry("spark.sql.sources.schema.numParts", "1"),
* Map.entry("transient_lastDdlTime", "1680894197"),
* Map.entry("spark.sql.partitionProvider", "catalog"),
* Map.entry("owner", "John Doe"),
* Map.entry("spark.sql.sources.schema.part.0", "{\"type\":\"struct\",\"fields\":[{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"age\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}"),
* Map.entry("spark.sql.sources.provider", "iceberg"),
* Map.entry("provider", "iceberg")
* ))
* .build())
* .build());
* }
* }
* ```
* ```yaml
* resources:
* catalog:
* type: gcp:biglake:Catalog
* properties:
* name: my_catalog
* location: US
* bucket:
* type: gcp:storage:Bucket
* properties:
* name: my_bucket
* location: US
* forceDestroy: true
* uniformBucketLevelAccess: true
* metadataFolder:
* type: gcp:storage:BucketObject
* name: metadata_folder
* properties:
* name: metadata/
* content: ' '
* bucket: ${bucket.name}
* dataFolder:
* type: gcp:storage:BucketObject
* name: data_folder
* properties:
* name: data/
* content: ' '
* bucket: ${bucket.name}
* database:
* type: gcp:biglake:Database
* properties:
* name: my_database
* catalog: ${catalog.id}
* type: HIVE
* hiveOptions:
* locationUri: gs://${bucket.name}/${metadataFolder.name}
* parameters:
* owner: Alex
* table:
* type: gcp:biglake:Table
* properties:
* name: my_table
* database: ${database.id}
* type: HIVE
* hiveOptions:
* tableType: MANAGED_TABLE
* storageDescriptor:
* locationUri: gs://${bucket.name}/${dataFolder.name}
* inputFormat: org.apache.hadoop.mapred.SequenceFileInputFormat
* outputFormat: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
* parameters:
* spark.sql.create.version: 3.1.3
* spark.sql.sources.schema.numParts: '1'
* transient_lastDdlTime: '1680894197'
* spark.sql.partitionProvider: catalog
* owner: John Doe
* spark.sql.sources.schema.part.0: '{"type":"struct","fields":[{"name":"id","type":"integer","nullable":true,"metadata":{}},{"name":"name","type":"string","nullable":true,"metadata":{}},{"name":"age","type":"integer","nullable":true,"metadata":{}}]}'
* spark.sql.sources.provider: iceberg
* provider: iceberg
* ```
*
* ## Import
* Table can be imported using any of these accepted formats:
* * `{{database}}/tables/{{name}}`
* When using the `pulumi import` command, Table can be imported using one of the formats above. For example:
* ```sh
* $ pulumi import gcp:biglake/table:Table default {{database}}/tables/{{name}}
* ```
*/
public class Table internal constructor(
override val javaResource: com.pulumi.gcp.biglake.Table,
) : KotlinCustomResource(javaResource, TableMapper) {
/**
* Output only. The creation time of the table. A timestamp in RFC3339 UTC
* "Zulu" format, with nanosecond resolution and up to nine fractional
* digits. Examples: "2014-10-02T15:01:23Z" and
* "2014-10-02T15:01:23.045123456Z".
*/
public val createTime: Output
get() = javaResource.createTime().applyValue({ args0 -> args0 })
/**
* The id of the parent database.
*/
public val database: Output?
get() = javaResource.database().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })
/**
* Output only. The deletion time of the table. Only set after the
* table is deleted. A timestamp in RFC3339 UTC "Zulu" format, with
* nanosecond resolution and up to nine fractional digits. Examples:
* "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
*/
public val deleteTime: Output
get() = javaResource.deleteTime().applyValue({ args0 -> args0 })
/**
* The checksum of a table object computed by the server based on the value
* of other fields. It may be sent on update requests to ensure the client
* has an up-to-date value before proceeding. It is only checked for update
* table operations.
*/
public val etag: Output
get() = javaResource.etag().applyValue({ args0 -> args0 })
/**
* Output only. The time when this table is considered expired. Only set
* after the table is deleted. A timestamp in RFC3339 UTC "Zulu" format,
* with nanosecond resolution and up to nine fractional digits. Examples:
* "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
*/
public val expireTime: Output
get() = javaResource.expireTime().applyValue({ args0 -> args0 })
/**
* Options of a Hive table.
* Structure is documented below.
*/
public val hiveOptions: Output?
get() = javaResource.hiveOptions().applyValue({ args0 ->
args0.map({ args0 ->
args0.let({ args0 ->
toKotlin(args0)
})
}).orElse(null)
})
/**
* Output only. The name of the Table. Format:
* projects/{project_id_or_number}/locations/{locationId}/catalogs/{catalogId}/databases/{databaseId}/tables/{tableId}
* - - -
*/
public val name: Output
get() = javaResource.name().applyValue({ args0 -> args0 })
/**
* The database type.
* Possible values are: `HIVE`.
*/
public val type: Output?
get() = javaResource.type().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })
/**
* Output only. The last modification time of the table. A timestamp in
* RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine
* fractional digits. Examples: "2014-10-02T15:01:23Z" and
* "2014-10-02T15:01:23.045123456Z".
*/
public val updateTime: Output
get() = javaResource.updateTime().applyValue({ args0 -> args0 })
}
public object TableMapper : ResourceMapper {
override fun supportsMappingOfType(javaResource: Resource): Boolean =
com.pulumi.gcp.biglake.Table::class == javaResource::class
override fun map(javaResource: Resource): Table = Table(
javaResource as
com.pulumi.gcp.biglake.Table,
)
}
/**
* @see [Table].
* @param name The _unique_ name of the resulting resource.
* @param block Builder for [Table].
*/
public suspend fun table(name: String, block: suspend TableResourceBuilder.() -> Unit): Table {
val builder = TableResourceBuilder()
builder.name(name)
block(builder)
return builder.build()
}
/**
* @see [Table].
* @param name The _unique_ name of the resulting resource.
*/
public fun table(name: String): Table {
val builder = TableResourceBuilder()
builder.name(name)
return builder.build()
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy