Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.pulumi.gcp.dataproc.kotlin.JobArgs.kt Maven / Gradle / Ivy
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.dataproc.kotlin
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.JobArgs.builder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHadoopConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHadoopConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHiveConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHiveConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPigConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPigConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPlacementArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPlacementArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPrestoConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPrestoConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPysparkConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPysparkConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobReferenceArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobReferenceArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSchedulingArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSchedulingArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparkConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparkConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparksqlConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparksqlConfigArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Boolean
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName
/**
* Manages a job resource within a Dataproc cluster within GCE. For more information see
* [the official dataproc documentation](https://cloud.google.com/dataproc/).
* !> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const mycluster = new gcp.dataproc.Cluster("mycluster", {
* name: "dproc-cluster-unique-name",
* region: "us-central1",
* });
* // Submit an example spark job to a dataproc cluster
* const spark = new gcp.dataproc.Job("spark", {
* region: mycluster.region,
* forceDelete: true,
* placement: {
* clusterName: mycluster.name,
* },
* sparkConfig: {
* mainClass: "org.apache.spark.examples.SparkPi",
* jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
* args: ["1000"],
* properties: {
* "spark.logConf": "true",
* },
* loggingConfig: {
* driverLogLevels: {
* root: "INFO",
* },
* },
* },
* });
* // Submit an example pyspark job to a dataproc cluster
* const pyspark = new gcp.dataproc.Job("pyspark", {
* region: mycluster.region,
* forceDelete: true,
* placement: {
* clusterName: mycluster.name,
* },
* pysparkConfig: {
* mainPythonFileUri: "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* properties: {
* "spark.logConf": "true",
* },
* },
* });
* export const sparkStatus = spark.statuses.apply(statuses => statuses[0].state);
* export const pysparkStatus = pyspark.statuses.apply(statuses => statuses[0].state);
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* mycluster = gcp.dataproc.Cluster("mycluster",
* name="dproc-cluster-unique-name",
* region="us-central1")
* # Submit an example spark job to a dataproc cluster
* spark = gcp.dataproc.Job("spark",
* region=mycluster.region,
* force_delete=True,
* placement={
* "cluster_name": mycluster.name,
* },
* spark_config={
* "main_class": "org.apache.spark.examples.SparkPi",
* "jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
* "args": ["1000"],
* "properties": {
* "spark_log_conf": "true",
* },
* "logging_config": {
* "driver_log_levels": {
* "root": "INFO",
* },
* },
* })
* # Submit an example pyspark job to a dataproc cluster
* pyspark = gcp.dataproc.Job("pyspark",
* region=mycluster.region,
* force_delete=True,
* placement={
* "cluster_name": mycluster.name,
* },
* pyspark_config={
* "main_python_file_uri": "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* "properties": {
* "spark_log_conf": "true",
* },
* })
* pulumi.export("sparkStatus", spark.statuses[0].state)
* pulumi.export("pysparkStatus", pyspark.statuses[0].state)
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
* {
* Name = "dproc-cluster-unique-name",
* Region = "us-central1",
* });
* // Submit an example spark job to a dataproc cluster
* var spark = new Gcp.Dataproc.Job("spark", new()
* {
* Region = mycluster.Region,
* ForceDelete = true,
* Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
* {
* ClusterName = mycluster.Name,
* },
* SparkConfig = new Gcp.Dataproc.Inputs.JobSparkConfigArgs
* {
* MainClass = "org.apache.spark.examples.SparkPi",
* JarFileUris = new[]
* {
* "file:///usr/lib/spark/examples/jars/spark-examples.jar",
* },
* Args = new[]
* {
* "1000",
* },
* Properties =
* {
* { "spark.logConf", "true" },
* },
* LoggingConfig = new Gcp.Dataproc.Inputs.JobSparkConfigLoggingConfigArgs
* {
* DriverLogLevels =
* {
* { "root", "INFO" },
* },
* },
* },
* });
* // Submit an example pyspark job to a dataproc cluster
* var pyspark = new Gcp.Dataproc.Job("pyspark", new()
* {
* Region = mycluster.Region,
* ForceDelete = true,
* Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
* {
* ClusterName = mycluster.Name,
* },
* PysparkConfig = new Gcp.Dataproc.Inputs.JobPysparkConfigArgs
* {
* MainPythonFileUri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* Properties =
* {
* { "spark.logConf", "true" },
* },
* },
* });
* return new Dictionary
* {
* ["sparkStatus"] = spark.Statuses.Apply(statuses => statuses[0].State),
* ["pysparkStatus"] = pyspark.Statuses.Apply(statuses => statuses[0].State),
* };
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* mycluster, err := dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
* Name: pulumi.String("dproc-cluster-unique-name"),
* Region: pulumi.String("us-central1"),
* })
* if err != nil {
* return err
* }
* // Submit an example spark job to a dataproc cluster
* spark, err := dataproc.NewJob(ctx, "spark", &dataproc.JobArgs{
* Region: mycluster.Region,
* ForceDelete: pulumi.Bool(true),
* Placement: &dataproc.JobPlacementArgs{
* ClusterName: mycluster.Name,
* },
* SparkConfig: &dataproc.JobSparkConfigArgs{
* MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
* JarFileUris: pulumi.StringArray{
* pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
* },
* Args: pulumi.StringArray{
* pulumi.String("1000"),
* },
* Properties: pulumi.StringMap{
* "spark.logConf": pulumi.String("true"),
* },
* LoggingConfig: &dataproc.JobSparkConfigLoggingConfigArgs{
* DriverLogLevels: pulumi.StringMap{
* "root": pulumi.String("INFO"),
* },
* },
* },
* })
* if err != nil {
* return err
* }
* // Submit an example pyspark job to a dataproc cluster
* pyspark, err := dataproc.NewJob(ctx, "pyspark", &dataproc.JobArgs{
* Region: mycluster.Region,
* ForceDelete: pulumi.Bool(true),
* Placement: &dataproc.JobPlacementArgs{
* ClusterName: mycluster.Name,
* },
* PysparkConfig: &dataproc.JobPysparkConfigArgs{
* MainPythonFileUri: pulumi.String("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py"),
* Properties: pulumi.StringMap{
* "spark.logConf": pulumi.String("true"),
* },
* },
* })
* if err != nil {
* return err
* }
* ctx.Export("sparkStatus", spark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
* return &statuses[0].State, nil
* }).(pulumi.StringPtrOutput))
* ctx.Export("pysparkStatus", pyspark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
* return &statuses[0].State, nil
* }).(pulumi.StringPtrOutput))
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.dataproc.Cluster;
* import com.pulumi.gcp.dataproc.ClusterArgs;
* import com.pulumi.gcp.dataproc.Job;
* import com.pulumi.gcp.dataproc.JobArgs;
* import com.pulumi.gcp.dataproc.inputs.JobPlacementArgs;
* import com.pulumi.gcp.dataproc.inputs.JobSparkConfigArgs;
* import com.pulumi.gcp.dataproc.inputs.JobSparkConfigLoggingConfigArgs;
* import com.pulumi.gcp.dataproc.inputs.JobPysparkConfigArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var mycluster = new Cluster("mycluster", ClusterArgs.builder()
* .name("dproc-cluster-unique-name")
* .region("us-central1")
* .build());
* // Submit an example spark job to a dataproc cluster
* var spark = new Job("spark", JobArgs.builder()
* .region(mycluster.region())
* .forceDelete(true)
* .placement(JobPlacementArgs.builder()
* .clusterName(mycluster.name())
* .build())
* .sparkConfig(JobSparkConfigArgs.builder()
* .mainClass("org.apache.spark.examples.SparkPi")
* .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
* .args("1000")
* .properties(Map.of("spark.logConf", "true"))
* .loggingConfig(JobSparkConfigLoggingConfigArgs.builder()
* .driverLogLevels(Map.of("root", "INFO"))
* .build())
* .build())
* .build());
* // Submit an example pyspark job to a dataproc cluster
* var pyspark = new Job("pyspark", JobArgs.builder()
* .region(mycluster.region())
* .forceDelete(true)
* .placement(JobPlacementArgs.builder()
* .clusterName(mycluster.name())
* .build())
* .pysparkConfig(JobPysparkConfigArgs.builder()
* .mainPythonFileUri("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py")
* .properties(Map.of("spark.logConf", "true"))
* .build())
* .build());
* ctx.export("sparkStatus", spark.statuses().applyValue(statuses -> statuses[0].state()));
* ctx.export("pysparkStatus", pyspark.statuses().applyValue(statuses -> statuses[0].state()));
* }
* }
* ```
* ```yaml
* resources:
* mycluster:
* type: gcp:dataproc:Cluster
* properties:
* name: dproc-cluster-unique-name
* region: us-central1
* # Submit an example spark job to a dataproc cluster
* spark:
* type: gcp:dataproc:Job
* properties:
* region: ${mycluster.region}
* forceDelete: true
* placement:
* clusterName: ${mycluster.name}
* sparkConfig:
* mainClass: org.apache.spark.examples.SparkPi
* jarFileUris:
* - file:///usr/lib/spark/examples/jars/spark-examples.jar
* args:
* - '1000'
* properties:
* spark.logConf: 'true'
* loggingConfig:
* driverLogLevels:
* root: INFO
* # Submit an example pyspark job to a dataproc cluster
* pyspark:
* type: gcp:dataproc:Job
* properties:
* region: ${mycluster.region}
* forceDelete: true
* placement:
* clusterName: ${mycluster.name}
* pysparkConfig:
* mainPythonFileUri: gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py
* properties:
* spark.logConf: 'true'
* outputs:
* # Check out current state of the jobs
* sparkStatus: ${spark.statuses[0].state}
* pysparkStatus: ${pyspark.statuses[0].state}
* ```
*
* ## Import
* This resource does not support import.
* @property forceDelete By default, you can only delete inactive jobs within
* Dataproc. Setting this to true, and calling destroy, will ensure that the
* job is first cancelled before issuing the delete.
* @property hadoopConfig The config of Hadoop job
* @property hiveConfig The config of hive job
* @property labels The list of labels (key/value pairs) to add to the job.
* **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
* Please refer to the field 'effective_labels' for all of the labels present on the resource.
* @property pigConfig The config of pag job.
* @property placement The config of job placement.
* @property prestoConfig The config of presto job
* @property project The project in which the `cluster` can be found and jobs
* subsequently run against. If it is not provided, the provider project is used.
* @property pysparkConfig The config of pySpark job.
* @property reference The reference of the job
* @property region The Cloud Dataproc region. This essentially determines which clusters are available
* for this job to be submitted to. If not specified, defaults to `global`.
* @property scheduling Optional. Job scheduling configuration.
* @property sparkConfig The config of the Spark job.
* @property sparksqlConfig The config of SparkSql job
*/
public data class JobArgs(
public val forceDelete: Output? = null,
public val hadoopConfig: Output? = null,
public val hiveConfig: Output? = null,
public val labels: Output>? = null,
public val pigConfig: Output? = null,
public val placement: Output? = null,
public val prestoConfig: Output? = null,
public val project: Output? = null,
public val pysparkConfig: Output? = null,
public val reference: Output? = null,
public val region: Output? = null,
public val scheduling: Output? = null,
public val sparkConfig: Output? = null,
public val sparksqlConfig: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.dataproc.JobArgs = com.pulumi.gcp.dataproc.JobArgs.builder()
.forceDelete(forceDelete?.applyValue({ args0 -> args0 }))
.hadoopConfig(hadoopConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.hiveConfig(hiveConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.labels(labels?.applyValue({ args0 -> args0.map({ args0 -> args0.key.to(args0.value) }).toMap() }))
.pigConfig(pigConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.placement(placement?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.prestoConfig(prestoConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.project(project?.applyValue({ args0 -> args0 }))
.pysparkConfig(pysparkConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.reference(reference?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.region(region?.applyValue({ args0 -> args0 }))
.scheduling(scheduling?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.sparkConfig(sparkConfig?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
.sparksqlConfig(
sparksqlConfig?.applyValue({ args0 ->
args0.let({ args0 ->
args0.toJava()
})
}),
).build()
}
/**
* Builder for [JobArgs].
*/
@PulumiTagMarker
public class JobArgsBuilder internal constructor() {
private var forceDelete: Output? = null
private var hadoopConfig: Output? = null
private var hiveConfig: Output? = null
private var labels: Output>? = null
private var pigConfig: Output? = null
private var placement: Output? = null
private var prestoConfig: Output? = null
private var project: Output? = null
private var pysparkConfig: Output? = null
private var reference: Output? = null
private var region: Output? = null
private var scheduling: Output? = null
private var sparkConfig: Output? = null
private var sparksqlConfig: Output? = null
/**
* @param value By default, you can only delete inactive jobs within
* Dataproc. Setting this to true, and calling destroy, will ensure that the
* job is first cancelled before issuing the delete.
*/
@JvmName("heubnsbkdlilrhvd")
public suspend fun forceDelete(`value`: Output) {
this.forceDelete = value
}
/**
* @param value The config of Hadoop job
*/
@JvmName("vputlerlmirmryya")
public suspend fun hadoopConfig(`value`: Output) {
this.hadoopConfig = value
}
/**
* @param value The config of hive job
*/
@JvmName("hqgsltqxihktqddd")
public suspend fun hiveConfig(`value`: Output) {
this.hiveConfig = value
}
/**
* @param value The list of labels (key/value pairs) to add to the job.
* **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
* Please refer to the field 'effective_labels' for all of the labels present on the resource.
*/
@JvmName("dpwbfkxmukqdfbuj")
public suspend fun labels(`value`: Output>) {
this.labels = value
}
/**
* @param value The config of pag job.
*/
@JvmName("ehxgolucixsuomsi")
public suspend fun pigConfig(`value`: Output) {
this.pigConfig = value
}
/**
* @param value The config of job placement.
*/
@JvmName("psyiliewviehupus")
public suspend fun placement(`value`: Output) {
this.placement = value
}
/**
* @param value The config of presto job
*/
@JvmName("ikbketoilxmhqyum")
public suspend fun prestoConfig(`value`: Output) {
this.prestoConfig = value
}
/**
* @param value The project in which the `cluster` can be found and jobs
* subsequently run against. If it is not provided, the provider project is used.
*/
@JvmName("flwgefvfurypoikj")
public suspend fun project(`value`: Output) {
this.project = value
}
/**
* @param value The config of pySpark job.
*/
@JvmName("nmedjcljvavocfbc")
public suspend fun pysparkConfig(`value`: Output) {
this.pysparkConfig = value
}
/**
* @param value The reference of the job
*/
@JvmName("xeqgdbloesixfwej")
public suspend fun reference(`value`: Output) {
this.reference = value
}
/**
* @param value The Cloud Dataproc region. This essentially determines which clusters are available
* for this job to be submitted to. If not specified, defaults to `global`.
*/
@JvmName("hgwtmhetkbkeocjh")
public suspend fun region(`value`: Output) {
this.region = value
}
/**
* @param value Optional. Job scheduling configuration.
*/
@JvmName("vcfvgtasdlwxwhwd")
public suspend fun scheduling(`value`: Output) {
this.scheduling = value
}
/**
* @param value The config of the Spark job.
*/
@JvmName("neostwxqvtjattti")
public suspend fun sparkConfig(`value`: Output) {
this.sparkConfig = value
}
/**
* @param value The config of SparkSql job
*/
@JvmName("bxarmrqdbosgvfbs")
public suspend fun sparksqlConfig(`value`: Output) {
this.sparksqlConfig = value
}
/**
* @param value By default, you can only delete inactive jobs within
* Dataproc. Setting this to true, and calling destroy, will ensure that the
* job is first cancelled before issuing the delete.
*/
@JvmName("qbhbcelwiarykiuf")
public suspend fun forceDelete(`value`: Boolean?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.forceDelete = mapped
}
/**
* @param value The config of Hadoop job
*/
@JvmName("yhctcuuwglvktcfn")
public suspend fun hadoopConfig(`value`: JobHadoopConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.hadoopConfig = mapped
}
/**
* @param argument The config of Hadoop job
*/
@JvmName("socdpeaocgdopuwq")
public suspend fun hadoopConfig(argument: suspend JobHadoopConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobHadoopConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.hadoopConfig = mapped
}
/**
* @param value The config of hive job
*/
@JvmName("qsxggeeujqclggks")
public suspend fun hiveConfig(`value`: JobHiveConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.hiveConfig = mapped
}
/**
* @param argument The config of hive job
*/
@JvmName("upjlawrtyavvapkk")
public suspend fun hiveConfig(argument: suspend JobHiveConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobHiveConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.hiveConfig = mapped
}
/**
* @param value The list of labels (key/value pairs) to add to the job.
* **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
* Please refer to the field 'effective_labels' for all of the labels present on the resource.
*/
@JvmName("gmcppyraqfagrppj")
public suspend fun labels(`value`: Map?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.labels = mapped
}
/**
* @param values The list of labels (key/value pairs) to add to the job.
* **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
* Please refer to the field 'effective_labels' for all of the labels present on the resource.
*/
@JvmName("xdelevhoyiiuorhl")
public fun labels(vararg values: Pair) {
val toBeMapped = values.toMap()
val mapped = toBeMapped.let({ args0 -> of(args0) })
this.labels = mapped
}
/**
* @param value The config of pag job.
*/
@JvmName("eiporqwmttfphrkn")
public suspend fun pigConfig(`value`: JobPigConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.pigConfig = mapped
}
/**
* @param argument The config of pag job.
*/
@JvmName("ofvavbdrsoepowcf")
public suspend fun pigConfig(argument: suspend JobPigConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobPigConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.pigConfig = mapped
}
/**
* @param value The config of job placement.
*/
@JvmName("lhgdxfvoddkchmir")
public suspend fun placement(`value`: JobPlacementArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.placement = mapped
}
/**
* @param argument The config of job placement.
*/
@JvmName("tfiquwqofyurkwyv")
public suspend fun placement(argument: suspend JobPlacementArgsBuilder.() -> Unit) {
val toBeMapped = JobPlacementArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.placement = mapped
}
/**
* @param value The config of presto job
*/
@JvmName("tddwkugxfojupars")
public suspend fun prestoConfig(`value`: JobPrestoConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.prestoConfig = mapped
}
/**
* @param argument The config of presto job
*/
@JvmName("epiwhrputcvdvsds")
public suspend fun prestoConfig(argument: suspend JobPrestoConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobPrestoConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.prestoConfig = mapped
}
/**
* @param value The project in which the `cluster` can be found and jobs
* subsequently run against. If it is not provided, the provider project is used.
*/
@JvmName("ceqahuabbsvpexsk")
public suspend fun project(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.project = mapped
}
/**
* @param value The config of pySpark job.
*/
@JvmName("oyusqipmkxsvwtgg")
public suspend fun pysparkConfig(`value`: JobPysparkConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.pysparkConfig = mapped
}
/**
* @param argument The config of pySpark job.
*/
@JvmName("ymwrqdargrbnfraf")
public suspend fun pysparkConfig(argument: suspend JobPysparkConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobPysparkConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.pysparkConfig = mapped
}
/**
* @param value The reference of the job
*/
@JvmName("msaiowheteuffjsn")
public suspend fun reference(`value`: JobReferenceArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.reference = mapped
}
/**
* @param argument The reference of the job
*/
@JvmName("jyyfiqdduestdfbu")
public suspend fun reference(argument: suspend JobReferenceArgsBuilder.() -> Unit) {
val toBeMapped = JobReferenceArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.reference = mapped
}
/**
* @param value The Cloud Dataproc region. This essentially determines which clusters are available
* for this job to be submitted to. If not specified, defaults to `global`.
*/
@JvmName("jpmejovokreqyejw")
public suspend fun region(`value`: String?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.region = mapped
}
/**
* @param value Optional. Job scheduling configuration.
*/
@JvmName("olaubfcsnojljirt")
public suspend fun scheduling(`value`: JobSchedulingArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.scheduling = mapped
}
/**
* @param argument Optional. Job scheduling configuration.
*/
@JvmName("whhweiqcshbkvqcc")
public suspend fun scheduling(argument: suspend JobSchedulingArgsBuilder.() -> Unit) {
val toBeMapped = JobSchedulingArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.scheduling = mapped
}
/**
* @param value The config of the Spark job.
*/
@JvmName("drbfwupivjupwonn")
public suspend fun sparkConfig(`value`: JobSparkConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.sparkConfig = mapped
}
/**
* @param argument The config of the Spark job.
*/
@JvmName("uspsiqttkjrksena")
public suspend fun sparkConfig(argument: suspend JobSparkConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobSparkConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.sparkConfig = mapped
}
/**
* @param value The config of SparkSql job
*/
@JvmName("yvdjimcoxdepigug")
public suspend fun sparksqlConfig(`value`: JobSparksqlConfigArgs?) {
val toBeMapped = value
val mapped = toBeMapped?.let({ args0 -> of(args0) })
this.sparksqlConfig = mapped
}
/**
* @param argument The config of SparkSql job
*/
@JvmName("aojlefxuxttjgydk")
public suspend fun sparksqlConfig(argument: suspend JobSparksqlConfigArgsBuilder.() -> Unit) {
val toBeMapped = JobSparksqlConfigArgsBuilder().applySuspend { argument() }.build()
val mapped = of(toBeMapped)
this.sparksqlConfig = mapped
}
internal fun build(): JobArgs = JobArgs(
forceDelete = forceDelete,
hadoopConfig = hadoopConfig,
hiveConfig = hiveConfig,
labels = labels,
pigConfig = pigConfig,
placement = placement,
prestoConfig = prestoConfig,
project = project,
pysparkConfig = pysparkConfig,
reference = reference,
region = region,
scheduling = scheduling,
sparkConfig = sparkConfig,
sparksqlConfig = sparksqlConfig,
)
}