All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.bigquery.kotlin.Routine.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.10.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.bigquery.kotlin

import com.pulumi.core.Output
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineArgument
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineRemoteFunctionOptions
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineSparkOptions
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineArgument.Companion.toKotlin as routineArgumentToKotlin
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineRemoteFunctionOptions.Companion.toKotlin as routineRemoteFunctionOptionsToKotlin
import com.pulumi.gcp.bigquery.kotlin.outputs.RoutineSparkOptions.Companion.toKotlin as routineSparkOptionsToKotlin

/**
 * Builder for [Routine].
 */
@PulumiTagMarker
public class RoutineResourceBuilder internal constructor() {
    public var name: String? = null

    public var args: RoutineArgs = RoutineArgs()

    public var opts: CustomResourceOptions = CustomResourceOptions()

    /**
     * @param name The _unique_ name of the resulting resource.
     */
    public fun name(`value`: String) {
        this.name = value
    }

    /**
     * @param block The arguments to use to populate this resource's properties.
     */
    public suspend fun args(block: suspend RoutineArgsBuilder.() -> Unit) {
        val builder = RoutineArgsBuilder()
        block(builder)
        this.args = builder.build()
    }

    /**
     * @param block A bag of options that control this resource's behavior.
     */
    public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
        this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
    }

    internal fun build(): Routine {
        val builtJavaResource = com.pulumi.gcp.bigquery.Routine(
            this.name,
            this.args.toJava(),
            this.opts.toJava(),
        )
        return Routine(builtJavaResource)
    }
}

/**
 * A user-defined function or a stored procedure that belongs to a Dataset
 * To get more information about Routine, see:
 * * [API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines)
 * * How-to Guides
 *     * [Routines Intro](https://cloud.google.com/bigquery/docs/reference/rest/v2/routines)
 * ## Example Usage
 * ### Bigquery Routine Basic
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const sproc = new gcp.bigquery.Routine("sproc", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "PROCEDURE",
 *     language: "SQL",
 *     definitionBody: "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);",
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * sproc = gcp.bigquery.Routine("sproc",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="PROCEDURE",
 *     language="SQL",
 *     definition_body="CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);")
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var sproc = new Gcp.BigQuery.Routine("sproc", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "PROCEDURE",
 *         Language = "SQL",
 *         DefinitionBody = "CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);",
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("PROCEDURE"),
 * 			Language:       pulumi.String("SQL"),
 * 			DefinitionBody: pulumi.String("CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var sproc = new Routine("sproc", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("PROCEDURE")
 *             .language("SQL")
 *             .definitionBody("CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);")
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   sproc:
 *     type: gcp:bigquery:Routine
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: PROCEDURE
 *       language: SQL
 *       definitionBody: CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);
 * ```
 * 
 * ### Bigquery Routine Json
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const sproc = new gcp.bigquery.Routine("sproc", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "SCALAR_FUNCTION",
 *     language: "JAVASCRIPT",
 *     definitionBody: "CREATE FUNCTION multiplyInputs return x*y;",
 *     arguments: [
 *         {
 *             name: "x",
 *             dataType: "{\"typeKind\" :  \"FLOAT64\"}",
 *         },
 *         {
 *             name: "y",
 *             dataType: "{\"typeKind\" :  \"FLOAT64\"}",
 *         },
 *     ],
 *     returnType: "{\"typeKind\" :  \"FLOAT64\"}",
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * sproc = gcp.bigquery.Routine("sproc",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="SCALAR_FUNCTION",
 *     language="JAVASCRIPT",
 *     definition_body="CREATE FUNCTION multiplyInputs return x*y;",
 *     arguments=[
 *         gcp.bigquery.RoutineArgumentArgs(
 *             name="x",
 *             data_type="{\"typeKind\" :  \"FLOAT64\"}",
 *         ),
 *         gcp.bigquery.RoutineArgumentArgs(
 *             name="y",
 *             data_type="{\"typeKind\" :  \"FLOAT64\"}",
 *         ),
 *     ],
 *     return_type="{\"typeKind\" :  \"FLOAT64\"}")
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var sproc = new Gcp.BigQuery.Routine("sproc", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "SCALAR_FUNCTION",
 *         Language = "JAVASCRIPT",
 *         DefinitionBody = "CREATE FUNCTION multiplyInputs return x*y;",
 *         Arguments = new[]
 *         {
 *             new Gcp.BigQuery.Inputs.RoutineArgumentArgs
 *             {
 *                 Name = "x",
 *                 DataType = "{\"typeKind\" :  \"FLOAT64\"}",
 *             },
 *             new Gcp.BigQuery.Inputs.RoutineArgumentArgs
 *             {
 *                 Name = "y",
 *                 DataType = "{\"typeKind\" :  \"FLOAT64\"}",
 *             },
 *         },
 *         ReturnType = "{\"typeKind\" :  \"FLOAT64\"}",
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("SCALAR_FUNCTION"),
 * 			Language:       pulumi.String("JAVASCRIPT"),
 * 			DefinitionBody: pulumi.String("CREATE FUNCTION multiplyInputs return x*y;"),
 * 			Arguments: bigquery.RoutineArgumentArray{
 * 				&bigquery.RoutineArgumentArgs{
 * 					Name:     pulumi.String("x"),
 * 					DataType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
 * 				},
 * 				&bigquery.RoutineArgumentArgs{
 * 					Name:     pulumi.String("y"),
 * 					DataType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
 * 				},
 * 			},
 * 			ReturnType: pulumi.String("{\"typeKind\" :  \"FLOAT64\"}"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var sproc = new Routine("sproc", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("SCALAR_FUNCTION")
 *             .language("JAVASCRIPT")
 *             .definitionBody("CREATE FUNCTION multiplyInputs return x*y;")
 *             .arguments(
 *                 RoutineArgumentArgs.builder()
 *                     .name("x")
 *                     .dataType("{\"typeKind\" :  \"FLOAT64\"}")
 *                     .build(),
 *                 RoutineArgumentArgs.builder()
 *                     .name("y")
 *                     .dataType("{\"typeKind\" :  \"FLOAT64\"}")
 *                     .build())
 *             .returnType("{\"typeKind\" :  \"FLOAT64\"}")
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   sproc:
 *     type: gcp:bigquery:Routine
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: SCALAR_FUNCTION
 *       language: JAVASCRIPT
 *       definitionBody: CREATE FUNCTION multiplyInputs return x*y;
 *       arguments:
 *         - name: x
 *           dataType: '{"typeKind" :  "FLOAT64"}'
 *         - name: y
 *           dataType: '{"typeKind" :  "FLOAT64"}'
 *       returnType: '{"typeKind" :  "FLOAT64"}'
 * ```
 * 
 * ### Bigquery Routine Tvf
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const sproc = new gcp.bigquery.Routine("sproc", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "TABLE_VALUED_FUNCTION",
 *     language: "SQL",
 *     definitionBody: "SELECT 1 + value AS value\n",
 *     arguments: [{
 *         name: "value",
 *         argumentKind: "FIXED_TYPE",
 *         dataType: JSON.stringify({
 *             typeKind: "INT64",
 *         }),
 *     }],
 *     returnTableType: JSON.stringify({
 *         columns: [{
 *             name: "value",
 *             type: {
 *                 typeKind: "INT64",
 *             },
 *         }],
 *     }),
 * });
 * ```
 * ```python
 * import pulumi
 * import json
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * sproc = gcp.bigquery.Routine("sproc",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="TABLE_VALUED_FUNCTION",
 *     language="SQL",
 *     definition_body="SELECT 1 + value AS value\n",
 *     arguments=[gcp.bigquery.RoutineArgumentArgs(
 *         name="value",
 *         argument_kind="FIXED_TYPE",
 *         data_type=json.dumps({
 *             "typeKind": "INT64",
 *         }),
 *     )],
 *     return_table_type=json.dumps({
 *         "columns": [{
 *             "name": "value",
 *             "type": {
 *                 "typeKind": "INT64",
 *             },
 *         }],
 *     }))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using System.Text.Json;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var sproc = new Gcp.BigQuery.Routine("sproc", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "TABLE_VALUED_FUNCTION",
 *         Language = "SQL",
 *         DefinitionBody = @"SELECT 1 + value AS value
 * ",
 *         Arguments = new[]
 *         {
 *             new Gcp.BigQuery.Inputs.RoutineArgumentArgs
 *             {
 *                 Name = "value",
 *                 ArgumentKind = "FIXED_TYPE",
 *                 DataType = JsonSerializer.Serialize(new Dictionary
 *                 {
 *                     ["typeKind"] = "INT64",
 *                 }),
 *             },
 *         },
 *         ReturnTableType = JsonSerializer.Serialize(new Dictionary
 *         {
 *             ["columns"] = new[]
 *             {
 *                 new Dictionary
 *                 {
 *                     ["name"] = "value",
 *                     ["type"] = new Dictionary
 *                     {
 *                         ["typeKind"] = "INT64",
 *                     },
 *                 },
 *             },
 *         }),
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"encoding/json"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		tmpJSON0, err := json.Marshal(map[string]interface{}{
 * 			"typeKind": "INT64",
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		json0 := string(tmpJSON0)
 * 		tmpJSON1, err := json.Marshal(map[string]interface{}{
 * 			"columns": []map[string]interface{}{
 * 				map[string]interface{}{
 * 					"name": "value",
 * 					"type": map[string]interface{}{
 * 						"typeKind": "INT64",
 * 					},
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		json1 := string(tmpJSON1)
 * 		_, err = bigquery.NewRoutine(ctx, "sproc", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("TABLE_VALUED_FUNCTION"),
 * 			Language:       pulumi.String("SQL"),
 * 			DefinitionBody: pulumi.String("SELECT 1 + value AS value\n"),
 * 			Arguments: bigquery.RoutineArgumentArray{
 * 				&bigquery.RoutineArgumentArgs{
 * 					Name:         pulumi.String("value"),
 * 					ArgumentKind: pulumi.String("FIXED_TYPE"),
 * 					DataType:     pulumi.String(json0),
 * 				},
 * 			},
 * 			ReturnTableType: pulumi.String(json1),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
 * import static com.pulumi.codegen.internal.Serialization.*;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var sproc = new Routine("sproc", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("TABLE_VALUED_FUNCTION")
 *             .language("SQL")
 *             .definitionBody("""
 * SELECT 1 + value AS value
 *             """)
 *             .arguments(RoutineArgumentArgs.builder()
 *                 .name("value")
 *                 .argumentKind("FIXED_TYPE")
 *                 .dataType(serializeJson(
 *                     jsonObject(
 *                         jsonProperty("typeKind", "INT64")
 *                     )))
 *                 .build())
 *             .returnTableType(serializeJson(
 *                 jsonObject(
 *                     jsonProperty("columns", jsonArray(jsonObject(
 *                         jsonProperty("name", "value"),
 *                         jsonProperty("type", jsonObject(
 *                             jsonProperty("typeKind", "INT64")
 *                         ))
 *                     )))
 *                 )))
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   sproc:
 *     type: gcp:bigquery:Routine
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: TABLE_VALUED_FUNCTION
 *       language: SQL
 *       definitionBody: |
 *         SELECT 1 + value AS value
 *       arguments:
 *         - name: value
 *           argumentKind: FIXED_TYPE
 *           dataType:
 *             fn::toJSON:
 *               typeKind: INT64
 *       returnTableType:
 *         fn::toJSON:
 *           columns:
 *             - name: value
 *               type:
 *                 typeKind: INT64
 * ```
 * 
 * ### Bigquery Routine Pyspark
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const testConnection = new gcp.bigquery.Connection("test", {
 *     connectionId: "connection_id",
 *     location: "US",
 *     spark: {},
 * });
 * const pyspark = new gcp.bigquery.Routine("pyspark", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "PROCEDURE",
 *     language: "PYTHON",
 *     definitionBody: `from pyspark.sql import SparkSession
 * spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
 * # Load data from BigQuery.
 * words = spark.read.format("bigquery") \\
 *   .option("table", "bigquery-public-data:samples.shakespeare") \\
 *   .load()
 * words.createOrReplaceTempView("words")
 * # Perform word count.
 * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
 * word_count.show()
 * word_count.printSchema()
 * # Saving the data to BigQuery
 * word_count.write.format("bigquery") \\
 *   .option("writeMethod", "direct") \\
 *   .save("wordcount_dataset.wordcount_output")
 * `,
 *     sparkOptions: {
 *         connection: testConnection.name,
 *         runtimeVersion: "2.1",
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * test_connection = gcp.bigquery.Connection("test",
 *     connection_id="connection_id",
 *     location="US",
 *     spark=gcp.bigquery.ConnectionSparkArgs())
 * pyspark = gcp.bigquery.Routine("pyspark",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="PROCEDURE",
 *     language="PYTHON",
 *     definition_body="""from pyspark.sql import SparkSession
 * spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
 * # Load data from BigQuery.
 * words = spark.read.format("bigquery") \
 *   .option("table", "bigquery-public-data:samples.shakespeare") \
 *   .load()
 * words.createOrReplaceTempView("words")
 * # Perform word count.
 * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
 * word_count.show()
 * word_count.printSchema()
 * # Saving the data to BigQuery
 * word_count.write.format("bigquery") \
 *   .option("writeMethod", "direct") \
 *   .save("wordcount_dataset.wordcount_output")
 * """,
 *     spark_options=gcp.bigquery.RoutineSparkOptionsArgs(
 *         connection=test_connection.name,
 *         runtime_version="2.1",
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var testConnection = new Gcp.BigQuery.Connection("test", new()
 *     {
 *         ConnectionId = "connection_id",
 *         Location = "US",
 *         Spark = null,
 *     });
 *     var pyspark = new Gcp.BigQuery.Routine("pyspark", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "PROCEDURE",
 *         Language = "PYTHON",
 *         DefinitionBody = @"from pyspark.sql import SparkSession
 * spark = SparkSession.builder.appName(""spark-bigquery-demo"").getOrCreate()
 * # Load data from BigQuery.
 * words = spark.read.format(""bigquery"") \
 *   .option(""table"", ""bigquery-public-data:samples.shakespeare"") \
 *   .load()
 * words.createOrReplaceTempView(""words"")
 * # Perform word count.
 * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed(""sum(word_count)"", ""sum_word_count"")
 * word_count.show()
 * word_count.printSchema()
 * # Saving the data to BigQuery
 * word_count.write.format(""bigquery"") \
 *   .option(""writeMethod"", ""direct"") \
 *   .save(""wordcount_dataset.wordcount_output"")
 * ",
 *         SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
 *         {
 *             Connection = testConnection.Name,
 *             RuntimeVersion = "2.1",
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
 * 			ConnectionId: pulumi.String("connection_id"),
 * 			Location:     pulumi.String("US"),
 * 			Spark:        nil,
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "pyspark", &bigquery.RoutineArgs{
 * 			DatasetId:   test.DatasetId,
 * 			RoutineId:   pulumi.String("routine_id"),
 * 			RoutineType: pulumi.String("PROCEDURE"),
 * 			Language:    pulumi.String("PYTHON"),
 * 			DefinitionBody: pulumi.String(`from pyspark.sql import SparkSession
 * spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
 * # Load data from BigQuery.
 * words = spark.read.format("bigquery") \
 *   .option("table", "bigquery-public-data:samples.shakespeare") \
 *   .load()
 * words.createOrReplaceTempView("words")
 * # Perform word count.
 * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
 * word_count.show()
 * word_count.printSchema()
 * # Saving the data to BigQuery
 * word_count.write.format("bigquery") \
 *   .option("writeMethod", "direct") \
 *   .save("wordcount_dataset.wordcount_output")
 * `),
 * 			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
 * 				Connection:     testConnection.Name,
 * 				RuntimeVersion: pulumi.String("2.1"),
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Connection;
 * import com.pulumi.gcp.bigquery.ConnectionArgs;
 * import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var testConnection = new Connection("testConnection", ConnectionArgs.builder()
 *             .connectionId("connection_id")
 *             .location("US")
 *             .spark()
 *             .build());
 *         var pyspark = new Routine("pyspark", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("PROCEDURE")
 *             .language("PYTHON")
 *             .definitionBody("""
 * from pyspark.sql import SparkSession
 * spark = SparkSession.builder.appName("spark-bigquery-demo").getOrCreate()
 * # Load data from BigQuery.
 * words = spark.read.format("bigquery") \
 *   .option("table", "bigquery-public-data:samples.shakespeare") \
 *   .load()
 * words.createOrReplaceTempView("words")
 * # Perform word count.
 * word_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed("sum(word_count)", "sum_word_count")
 * word_count.show()
 * word_count.printSchema()
 * # Saving the data to BigQuery
 * word_count.write.format("bigquery") \
 *   .option("writeMethod", "direct") \
 *   .save("wordcount_dataset.wordcount_output")
 *             """)
 *             .sparkOptions(RoutineSparkOptionsArgs.builder()
 *                 .connection(testConnection.name())
 *                 .runtimeVersion("2.1")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   testConnection:
 *     type: gcp:bigquery:Connection
 *     name: test
 *     properties:
 *       connectionId: connection_id
 *       location: US
 *       spark: {}
 *   pyspark:
 *     type: gcp:bigquery:Routine
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: PROCEDURE
 *       language: PYTHON
 *       definitionBody: "from pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName(\"spark-bigquery-demo\").getOrCreate()\n    \n# Load data from BigQuery.\nwords = spark.read.format(\"bigquery\") \\\n  .option(\"table\", \"bigquery-public-data:samples.shakespeare\") \\\n  .load()\nwords.createOrReplaceTempView(\"words\")\n    \n# Perform word count.\nword_count = words.select('word', 'word_count').groupBy('word').sum('word_count').withColumnRenamed(\"sum(word_count)\", \"sum_word_count\")\nword_count.show()\nword_count.printSchema()\n    \n# Saving the data to BigQuery\nword_count.write.format(\"bigquery\") \\\n  .option(\"writeMethod\", \"direct\") \\\n  .save(\"wordcount_dataset.wordcount_output\")\n"
 *       sparkOptions:
 *         connection: ${testConnection.name}
 *         runtimeVersion: '2.1'
 * ```
 * 
 * ### Bigquery Routine Pyspark Mainfile
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const testConnection = new gcp.bigquery.Connection("test", {
 *     connectionId: "connection_id",
 *     location: "US",
 *     spark: {},
 * });
 * const pysparkMainfile = new gcp.bigquery.Routine("pyspark_mainfile", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "PROCEDURE",
 *     language: "PYTHON",
 *     definitionBody: "",
 *     sparkOptions: {
 *         connection: testConnection.name,
 *         runtimeVersion: "2.1",
 *         mainFileUri: "gs://test-bucket/main.py",
 *         pyFileUris: ["gs://test-bucket/lib.py"],
 *         fileUris: ["gs://test-bucket/distribute_in_executor.json"],
 *         archiveUris: ["gs://test-bucket/distribute_in_executor.tar.gz"],
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * test_connection = gcp.bigquery.Connection("test",
 *     connection_id="connection_id",
 *     location="US",
 *     spark=gcp.bigquery.ConnectionSparkArgs())
 * pyspark_mainfile = gcp.bigquery.Routine("pyspark_mainfile",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="PROCEDURE",
 *     language="PYTHON",
 *     definition_body="",
 *     spark_options=gcp.bigquery.RoutineSparkOptionsArgs(
 *         connection=test_connection.name,
 *         runtime_version="2.1",
 *         main_file_uri="gs://test-bucket/main.py",
 *         py_file_uris=["gs://test-bucket/lib.py"],
 *         file_uris=["gs://test-bucket/distribute_in_executor.json"],
 *         archive_uris=["gs://test-bucket/distribute_in_executor.tar.gz"],
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var testConnection = new Gcp.BigQuery.Connection("test", new()
 *     {
 *         ConnectionId = "connection_id",
 *         Location = "US",
 *         Spark = null,
 *     });
 *     var pysparkMainfile = new Gcp.BigQuery.Routine("pyspark_mainfile", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "PROCEDURE",
 *         Language = "PYTHON",
 *         DefinitionBody = "",
 *         SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
 *         {
 *             Connection = testConnection.Name,
 *             RuntimeVersion = "2.1",
 *             MainFileUri = "gs://test-bucket/main.py",
 *             PyFileUris = new[]
 *             {
 *                 "gs://test-bucket/lib.py",
 *             },
 *             FileUris = new[]
 *             {
 *                 "gs://test-bucket/distribute_in_executor.json",
 *             },
 *             ArchiveUris = new[]
 *             {
 *                 "gs://test-bucket/distribute_in_executor.tar.gz",
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
 * 			ConnectionId: pulumi.String("connection_id"),
 * 			Location:     pulumi.String("US"),
 * 			Spark:        nil,
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "pyspark_mainfile", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("PROCEDURE"),
 * 			Language:       pulumi.String("PYTHON"),
 * 			DefinitionBody: pulumi.String(""),
 * 			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
 * 				Connection:     testConnection.Name,
 * 				RuntimeVersion: pulumi.String("2.1"),
 * 				MainFileUri:    pulumi.String("gs://test-bucket/main.py"),
 * 				PyFileUris: pulumi.StringArray{
 * 					pulumi.String("gs://test-bucket/lib.py"),
 * 				},
 * 				FileUris: pulumi.StringArray{
 * 					pulumi.String("gs://test-bucket/distribute_in_executor.json"),
 * 				},
 * 				ArchiveUris: pulumi.StringArray{
 * 					pulumi.String("gs://test-bucket/distribute_in_executor.tar.gz"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Connection;
 * import com.pulumi.gcp.bigquery.ConnectionArgs;
 * import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var testConnection = new Connection("testConnection", ConnectionArgs.builder()
 *             .connectionId("connection_id")
 *             .location("US")
 *             .spark()
 *             .build());
 *         var pysparkMainfile = new Routine("pysparkMainfile", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("PROCEDURE")
 *             .language("PYTHON")
 *             .definitionBody("")
 *             .sparkOptions(RoutineSparkOptionsArgs.builder()
 *                 .connection(testConnection.name())
 *                 .runtimeVersion("2.1")
 *                 .mainFileUri("gs://test-bucket/main.py")
 *                 .pyFileUris("gs://test-bucket/lib.py")
 *                 .fileUris("gs://test-bucket/distribute_in_executor.json")
 *                 .archiveUris("gs://test-bucket/distribute_in_executor.tar.gz")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   testConnection:
 *     type: gcp:bigquery:Connection
 *     name: test
 *     properties:
 *       connectionId: connection_id
 *       location: US
 *       spark: {}
 *   pysparkMainfile:
 *     type: gcp:bigquery:Routine
 *     name: pyspark_mainfile
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: PROCEDURE
 *       language: PYTHON
 *       definitionBody:
 *       sparkOptions:
 *         connection: ${testConnection.name}
 *         runtimeVersion: '2.1'
 *         mainFileUri: gs://test-bucket/main.py
 *         pyFileUris:
 *           - gs://test-bucket/lib.py
 *         fileUris:
 *           - gs://test-bucket/distribute_in_executor.json
 *         archiveUris:
 *           - gs://test-bucket/distribute_in_executor.tar.gz
 * ```
 * 
 * ### Bigquery Routine Spark Jar
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const testConnection = new gcp.bigquery.Connection("test", {
 *     connectionId: "connection_id",
 *     location: "US",
 *     spark: {},
 * });
 * const sparkJar = new gcp.bigquery.Routine("spark_jar", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "PROCEDURE",
 *     language: "SCALA",
 *     definitionBody: "",
 *     sparkOptions: {
 *         connection: testConnection.name,
 *         runtimeVersion: "2.1",
 *         containerImage: "gcr.io/my-project-id/my-spark-image:latest",
 *         mainClass: "com.google.test.jar.MainClass",
 *         jarUris: ["gs://test-bucket/uberjar_spark_spark3.jar"],
 *         properties: {
 *             "spark.dataproc.scaling.version": "2",
 *             "spark.reducer.fetchMigratedShuffle.enabled": "true",
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * test_connection = gcp.bigquery.Connection("test",
 *     connection_id="connection_id",
 *     location="US",
 *     spark=gcp.bigquery.ConnectionSparkArgs())
 * spark_jar = gcp.bigquery.Routine("spark_jar",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="PROCEDURE",
 *     language="SCALA",
 *     definition_body="",
 *     spark_options=gcp.bigquery.RoutineSparkOptionsArgs(
 *         connection=test_connection.name,
 *         runtime_version="2.1",
 *         container_image="gcr.io/my-project-id/my-spark-image:latest",
 *         main_class="com.google.test.jar.MainClass",
 *         jar_uris=["gs://test-bucket/uberjar_spark_spark3.jar"],
 *         properties={
 *             "spark.dataproc.scaling.version": "2",
 *             "spark.reducer.fetchMigratedShuffle.enabled": "true",
 *         },
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var testConnection = new Gcp.BigQuery.Connection("test", new()
 *     {
 *         ConnectionId = "connection_id",
 *         Location = "US",
 *         Spark = null,
 *     });
 *     var sparkJar = new Gcp.BigQuery.Routine("spark_jar", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "PROCEDURE",
 *         Language = "SCALA",
 *         DefinitionBody = "",
 *         SparkOptions = new Gcp.BigQuery.Inputs.RoutineSparkOptionsArgs
 *         {
 *             Connection = testConnection.Name,
 *             RuntimeVersion = "2.1",
 *             ContainerImage = "gcr.io/my-project-id/my-spark-image:latest",
 *             MainClass = "com.google.test.jar.MainClass",
 *             JarUris = new[]
 *             {
 *                 "gs://test-bucket/uberjar_spark_spark3.jar",
 *             },
 *             Properties =
 *             {
 *                 { "spark.dataproc.scaling.version", "2" },
 *                 { "spark.reducer.fetchMigratedShuffle.enabled", "true" },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
 * 			ConnectionId: pulumi.String("connection_id"),
 * 			Location:     pulumi.String("US"),
 * 			Spark:        nil,
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "spark_jar", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("PROCEDURE"),
 * 			Language:       pulumi.String("SCALA"),
 * 			DefinitionBody: pulumi.String(""),
 * 			SparkOptions: &bigquery.RoutineSparkOptionsArgs{
 * 				Connection:     testConnection.Name,
 * 				RuntimeVersion: pulumi.String("2.1"),
 * 				ContainerImage: pulumi.String("gcr.io/my-project-id/my-spark-image:latest"),
 * 				MainClass:      pulumi.String("com.google.test.jar.MainClass"),
 * 				JarUris: pulumi.StringArray{
 * 					pulumi.String("gs://test-bucket/uberjar_spark_spark3.jar"),
 * 				},
 * 				Properties: pulumi.StringMap{
 * 					"spark.dataproc.scaling.version":             pulumi.String("2"),
 * 					"spark.reducer.fetchMigratedShuffle.enabled": pulumi.String("true"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Connection;
 * import com.pulumi.gcp.bigquery.ConnectionArgs;
 * import com.pulumi.gcp.bigquery.inputs.ConnectionSparkArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineSparkOptionsArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var testConnection = new Connection("testConnection", ConnectionArgs.builder()
 *             .connectionId("connection_id")
 *             .location("US")
 *             .spark()
 *             .build());
 *         var sparkJar = new Routine("sparkJar", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("PROCEDURE")
 *             .language("SCALA")
 *             .definitionBody("")
 *             .sparkOptions(RoutineSparkOptionsArgs.builder()
 *                 .connection(testConnection.name())
 *                 .runtimeVersion("2.1")
 *                 .containerImage("gcr.io/my-project-id/my-spark-image:latest")
 *                 .mainClass("com.google.test.jar.MainClass")
 *                 .jarUris("gs://test-bucket/uberjar_spark_spark3.jar")
 *                 .properties(Map.ofEntries(
 *                     Map.entry("spark.dataproc.scaling.version", "2"),
 *                     Map.entry("spark.reducer.fetchMigratedShuffle.enabled", "true")
 *                 ))
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   testConnection:
 *     type: gcp:bigquery:Connection
 *     name: test
 *     properties:
 *       connectionId: connection_id
 *       location: US
 *       spark: {}
 *   sparkJar:
 *     type: gcp:bigquery:Routine
 *     name: spark_jar
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: PROCEDURE
 *       language: SCALA
 *       definitionBody:
 *       sparkOptions:
 *         connection: ${testConnection.name}
 *         runtimeVersion: '2.1'
 *         containerImage: gcr.io/my-project-id/my-spark-image:latest
 *         mainClass: com.google.test.jar.MainClass
 *         jarUris:
 *           - gs://test-bucket/uberjar_spark_spark3.jar
 *         properties:
 *           spark.dataproc.scaling.version: '2'
 *           spark.reducer.fetchMigratedShuffle.enabled: 'true'
 * ```
 * 
 * ### Bigquery Routine Data Governance Type
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "tf_test_dataset_id_15222"});
 * const customMaskingRoutine = new gcp.bigquery.Routine("custom_masking_routine", {
 *     datasetId: test.datasetId,
 *     routineId: "custom_masking_routine",
 *     routineType: "SCALAR_FUNCTION",
 *     language: "SQL",
 *     dataGovernanceType: "DATA_MASKING",
 *     definitionBody: "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
 *     arguments: [{
 *         name: "ssn",
 *         dataType: "{\"typeKind\" :  \"STRING\"}",
 *     }],
 *     returnType: "{\"typeKind\" :  \"STRING\"}",
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="tf_test_dataset_id_15222")
 * custom_masking_routine = gcp.bigquery.Routine("custom_masking_routine",
 *     dataset_id=test.dataset_id,
 *     routine_id="custom_masking_routine",
 *     routine_type="SCALAR_FUNCTION",
 *     language="SQL",
 *     data_governance_type="DATA_MASKING",
 *     definition_body="SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
 *     arguments=[gcp.bigquery.RoutineArgumentArgs(
 *         name="ssn",
 *         data_type="{\"typeKind\" :  \"STRING\"}",
 *     )],
 *     return_type="{\"typeKind\" :  \"STRING\"}")
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "tf_test_dataset_id_15222",
 *     });
 *     var customMaskingRoutine = new Gcp.BigQuery.Routine("custom_masking_routine", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "custom_masking_routine",
 *         RoutineType = "SCALAR_FUNCTION",
 *         Language = "SQL",
 *         DataGovernanceType = "DATA_MASKING",
 *         DefinitionBody = "SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')",
 *         Arguments = new[]
 *         {
 *             new Gcp.BigQuery.Inputs.RoutineArgumentArgs
 *             {
 *                 Name = "ssn",
 *                 DataType = "{\"typeKind\" :  \"STRING\"}",
 *             },
 *         },
 *         ReturnType = "{\"typeKind\" :  \"STRING\"}",
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("tf_test_dataset_id_15222"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "custom_masking_routine", &bigquery.RoutineArgs{
 * 			DatasetId:          test.DatasetId,
 * 			RoutineId:          pulumi.String("custom_masking_routine"),
 * 			RoutineType:        pulumi.String("SCALAR_FUNCTION"),
 * 			Language:           pulumi.String("SQL"),
 * 			DataGovernanceType: pulumi.String("DATA_MASKING"),
 * 			DefinitionBody:     pulumi.String("SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')"),
 * 			Arguments: bigquery.RoutineArgumentArray{
 * 				&bigquery.RoutineArgumentArgs{
 * 					Name:     pulumi.String("ssn"),
 * 					DataType: pulumi.String("{\"typeKind\" :  \"STRING\"}"),
 * 				},
 * 			},
 * 			ReturnType: pulumi.String("{\"typeKind\" :  \"STRING\"}"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineArgumentArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("tf_test_dataset_id_15222")
 *             .build());
 *         var customMaskingRoutine = new Routine("customMaskingRoutine", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("custom_masking_routine")
 *             .routineType("SCALAR_FUNCTION")
 *             .language("SQL")
 *             .dataGovernanceType("DATA_MASKING")
 *             .definitionBody("SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')")
 *             .arguments(RoutineArgumentArgs.builder()
 *                 .name("ssn")
 *                 .dataType("{\"typeKind\" :  \"STRING\"}")
 *                 .build())
 *             .returnType("{\"typeKind\" :  \"STRING\"}")
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: tf_test_dataset_id_15222
 *   customMaskingRoutine:
 *     type: gcp:bigquery:Routine
 *     name: custom_masking_routine
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: custom_masking_routine
 *       routineType: SCALAR_FUNCTION
 *       language: SQL
 *       dataGovernanceType: DATA_MASKING
 *       definitionBody: SAFE.REGEXP_REPLACE(ssn, '[0-9]', 'X')
 *       arguments:
 *         - name: ssn
 *           dataType: '{"typeKind" :  "STRING"}'
 *       returnType: '{"typeKind" :  "STRING"}'
 * ```
 * 
 * ### Bigquery Routine Remote Function
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const test = new gcp.bigquery.Dataset("test", {datasetId: "dataset_id"});
 * const testConnection = new gcp.bigquery.Connection("test", {
 *     connectionId: "connection_id",
 *     location: "US",
 *     cloudResource: {},
 * });
 * const remoteFunction = new gcp.bigquery.Routine("remote_function", {
 *     datasetId: test.datasetId,
 *     routineId: "routine_id",
 *     routineType: "SCALAR_FUNCTION",
 *     definitionBody: "",
 *     returnType: "{\"typeKind\" :  \"STRING\"}",
 *     remoteFunctionOptions: {
 *         endpoint: "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
 *         connection: testConnection.name,
 *         maxBatchingRows: "10",
 *         userDefinedContext: {
 *             z: "1.5",
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * test = gcp.bigquery.Dataset("test", dataset_id="dataset_id")
 * test_connection = gcp.bigquery.Connection("test",
 *     connection_id="connection_id",
 *     location="US",
 *     cloud_resource=gcp.bigquery.ConnectionCloudResourceArgs())
 * remote_function = gcp.bigquery.Routine("remote_function",
 *     dataset_id=test.dataset_id,
 *     routine_id="routine_id",
 *     routine_type="SCALAR_FUNCTION",
 *     definition_body="",
 *     return_type="{\"typeKind\" :  \"STRING\"}",
 *     remote_function_options=gcp.bigquery.RoutineRemoteFunctionOptionsArgs(
 *         endpoint="https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
 *         connection=test_connection.name,
 *         max_batching_rows="10",
 *         user_defined_context={
 *             "z": "1.5",
 *         },
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var test = new Gcp.BigQuery.Dataset("test", new()
 *     {
 *         DatasetId = "dataset_id",
 *     });
 *     var testConnection = new Gcp.BigQuery.Connection("test", new()
 *     {
 *         ConnectionId = "connection_id",
 *         Location = "US",
 *         CloudResource = null,
 *     });
 *     var remoteFunction = new Gcp.BigQuery.Routine("remote_function", new()
 *     {
 *         DatasetId = test.DatasetId,
 *         RoutineId = "routine_id",
 *         RoutineType = "SCALAR_FUNCTION",
 *         DefinitionBody = "",
 *         ReturnType = "{\"typeKind\" :  \"STRING\"}",
 *         RemoteFunctionOptions = new Gcp.BigQuery.Inputs.RoutineRemoteFunctionOptionsArgs
 *         {
 *             Endpoint = "https://us-east1-my_gcf_project.cloudfunctions.net/remote_add",
 *             Connection = testConnection.Name,
 *             MaxBatchingRows = "10",
 *             UserDefinedContext =
 *             {
 *                 { "z", "1.5" },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/bigquery"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		test, err := bigquery.NewDataset(ctx, "test", &bigquery.DatasetArgs{
 * 			DatasetId: pulumi.String("dataset_id"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		testConnection, err := bigquery.NewConnection(ctx, "test", &bigquery.ConnectionArgs{
 * 			ConnectionId:  pulumi.String("connection_id"),
 * 			Location:      pulumi.String("US"),
 * 			CloudResource: nil,
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = bigquery.NewRoutine(ctx, "remote_function", &bigquery.RoutineArgs{
 * 			DatasetId:      test.DatasetId,
 * 			RoutineId:      pulumi.String("routine_id"),
 * 			RoutineType:    pulumi.String("SCALAR_FUNCTION"),
 * 			DefinitionBody: pulumi.String(""),
 * 			ReturnType:     pulumi.String("{\"typeKind\" :  \"STRING\"}"),
 * 			RemoteFunctionOptions: &bigquery.RoutineRemoteFunctionOptionsArgs{
 * 				Endpoint:        pulumi.String("https://us-east1-my_gcf_project.cloudfunctions.net/remote_add"),
 * 				Connection:      testConnection.Name,
 * 				MaxBatchingRows: pulumi.String("10"),
 * 				UserDefinedContext: pulumi.StringMap{
 * 					"z": pulumi.String("1.5"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.bigquery.Dataset;
 * import com.pulumi.gcp.bigquery.DatasetArgs;
 * import com.pulumi.gcp.bigquery.Connection;
 * import com.pulumi.gcp.bigquery.ConnectionArgs;
 * import com.pulumi.gcp.bigquery.inputs.ConnectionCloudResourceArgs;
 * import com.pulumi.gcp.bigquery.Routine;
 * import com.pulumi.gcp.bigquery.RoutineArgs;
 * import com.pulumi.gcp.bigquery.inputs.RoutineRemoteFunctionOptionsArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var test = new Dataset("test", DatasetArgs.builder()
 *             .datasetId("dataset_id")
 *             .build());
 *         var testConnection = new Connection("testConnection", ConnectionArgs.builder()
 *             .connectionId("connection_id")
 *             .location("US")
 *             .cloudResource()
 *             .build());
 *         var remoteFunction = new Routine("remoteFunction", RoutineArgs.builder()
 *             .datasetId(test.datasetId())
 *             .routineId("routine_id")
 *             .routineType("SCALAR_FUNCTION")
 *             .definitionBody("")
 *             .returnType("{\"typeKind\" :  \"STRING\"}")
 *             .remoteFunctionOptions(RoutineRemoteFunctionOptionsArgs.builder()
 *                 .endpoint("https://us-east1-my_gcf_project.cloudfunctions.net/remote_add")
 *                 .connection(testConnection.name())
 *                 .maxBatchingRows("10")
 *                 .userDefinedContext(Map.of("z", "1.5"))
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   test:
 *     type: gcp:bigquery:Dataset
 *     properties:
 *       datasetId: dataset_id
 *   testConnection:
 *     type: gcp:bigquery:Connection
 *     name: test
 *     properties:
 *       connectionId: connection_id
 *       location: US
 *       cloudResource: {}
 *   remoteFunction:
 *     type: gcp:bigquery:Routine
 *     name: remote_function
 *     properties:
 *       datasetId: ${test.datasetId}
 *       routineId: routine_id
 *       routineType: SCALAR_FUNCTION
 *       definitionBody:
 *       returnType: '{"typeKind" :  "STRING"}'
 *       remoteFunctionOptions:
 *         endpoint: https://us-east1-my_gcf_project.cloudfunctions.net/remote_add
 *         connection: ${testConnection.name}
 *         maxBatchingRows: '10'
 *         userDefinedContext:
 *           z: '1.5'
 * ```
 * 
 * ## Import
 * Routine can be imported using any of these accepted formats:
 * * `projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}`
 * * `{{project}}/{{dataset_id}}/{{routine_id}}`
 * * `{{dataset_id}}/{{routine_id}}`
 * When using the `pulumi import` command, Routine can be imported using one of the formats above. For example:
 * ```sh
 * $ pulumi import gcp:bigquery/routine:Routine default projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}
 * ```
 * ```sh
 * $ pulumi import gcp:bigquery/routine:Routine default {{project}}/{{dataset_id}}/{{routine_id}}
 * ```
 * ```sh
 * $ pulumi import gcp:bigquery/routine:Routine default {{dataset_id}}/{{routine_id}}
 * ```
 */
public class Routine internal constructor(
    override val javaResource: com.pulumi.gcp.bigquery.Routine,
) : KotlinCustomResource(javaResource, RoutineMapper) {
    /**
     * Input/output argument of a function or a stored procedure.
     * Structure is documented below.
     */
    public val arguments: Output>?
        get() = javaResource.arguments().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 ->
                    args0.let({ args0 -> routineArgumentToKotlin(args0) })
                })
            }).orElse(null)
        })

    /**
     * The time when this routine was created, in milliseconds since the
     * epoch.
     */
    public val creationTime: Output
        get() = javaResource.creationTime().applyValue({ args0 -> args0 })

    /**
     * If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask
     * Possible values are: `DATA_MASKING`.
     */
    public val dataGovernanceType: Output?
        get() = javaResource.dataGovernanceType().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The ID of the dataset containing this routine
     */
    public val datasetId: Output
        get() = javaResource.datasetId().applyValue({ args0 -> args0 })

    /**
     * The body of the routine. For functions, this is the expression in the AS clause.
     * If language=SQL, it is the substring inside (but excluding) the parentheses.
     * - - -
     */
    public val definitionBody: Output
        get() = javaResource.definitionBody().applyValue({ args0 -> args0 })

    /**
     * The description of the routine if defined.
     */
    public val description: Output?
        get() = javaResource.description().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The determinism level of the JavaScript UDF if defined.
     * Possible values are: `DETERMINISM_LEVEL_UNSPECIFIED`, `DETERMINISTIC`, `NOT_DETERMINISTIC`.
     */
    public val determinismLevel: Output?
        get() = javaResource.determinismLevel().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Optional. If language = "JAVASCRIPT", this field stores the path of the
     * imported JAVASCRIPT libraries.
     */
    public val importedLibraries: Output>?
        get() = javaResource.importedLibraries().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 -> args0 })
            }).orElse(null)
        })

    /**
     * The language of the routine.
     * Possible values are: `SQL`, `JAVASCRIPT`, `PYTHON`, `JAVA`, `SCALA`.
     */
    public val language: Output?
        get() = javaResource.language().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * The time when this routine was modified, in milliseconds since the
     * epoch.
     */
    public val lastModifiedTime: Output
        get() = javaResource.lastModifiedTime().applyValue({ args0 -> args0 })

    /**
     * The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    public val project: Output
        get() = javaResource.project().applyValue({ args0 -> args0 })

    /**
     * Remote function specific options.
     * Structure is documented below.
     */
    public val remoteFunctionOptions: Output?
        get() = javaResource.remoteFunctionOptions().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> routineRemoteFunctionOptionsToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION".
     * If absent, the return table type is inferred from definitionBody at query time in each query
     * that references this routine. If present, then the columns in the evaluated table result will
     * be cast to match the column types specificed in return table type, at query time.
     */
    public val returnTableType: Output?
        get() = javaResource.returnTableType().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * A JSON schema for the return type. Optional if language = "SQL"; required otherwise.
     * If absent, the return type is inferred from definitionBody at query time in each query
     * that references this routine. If present, then the evaluated result will be cast to
     * the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON
     * string, any changes to the string will create a diff, even if the JSON itself hasn't
     * changed. If the API returns a different value for the same schema, e.g. it switche
     * d the order of values or replaced STRUCT field type with RECORD field type, we currently
     * cannot suppress the recurring diff this causes. As a workaround, we recommend using
     * the schema as returned by the API.
     */
    public val returnType: Output?
        get() = javaResource.returnType().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.
     */
    public val routineId: Output
        get() = javaResource.routineId().applyValue({ args0 -> args0 })

    /**
     * The type of routine.
     * Possible values are: `SCALAR_FUNCTION`, `PROCEDURE`, `TABLE_VALUED_FUNCTION`.
     */
    public val routineType: Output
        get() = javaResource.routineType().applyValue({ args0 -> args0 })

    /**
     * Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure.
     * Structure is documented below.
     */
    public val sparkOptions: Output?
        get() = javaResource.sparkOptions().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    routineSparkOptionsToKotlin(args0)
                })
            }).orElse(null)
        })
}

public object RoutineMapper : ResourceMapper {
    override fun supportsMappingOfType(javaResource: Resource): Boolean =
        com.pulumi.gcp.bigquery.Routine::class == javaResource::class

    override fun map(javaResource: Resource): Routine = Routine(
        javaResource as
            com.pulumi.gcp.bigquery.Routine,
    )
}

/**
 * @see [Routine].
 * @param name The _unique_ name of the resulting resource.
 * @param block Builder for [Routine].
 */
public suspend fun routine(name: String, block: suspend RoutineResourceBuilder.() -> Unit): Routine {
    val builder = RoutineResourceBuilder()
    builder.name(name)
    block(builder)
    return builder.build()
}

/**
 * @see [Routine].
 * @param name The _unique_ name of the resulting resource.
 */
public fun routine(name: String): Routine {
    val builder = RoutineResourceBuilder()
    builder.name(name)
    return builder.build()
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy