All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataproc.kotlin.Job.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.10.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataproc.kotlin

import com.pulumi.core.Output
import com.pulumi.gcp.dataproc.kotlin.outputs.JobHadoopConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobHiveConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPigConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPlacement
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPrestoConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPysparkConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobReference
import com.pulumi.gcp.dataproc.kotlin.outputs.JobScheduling
import com.pulumi.gcp.dataproc.kotlin.outputs.JobSparkConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobSparksqlConfig
import com.pulumi.gcp.dataproc.kotlin.outputs.JobStatus
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.List
import kotlin.collections.Map
import com.pulumi.gcp.dataproc.kotlin.outputs.JobHadoopConfig.Companion.toKotlin as jobHadoopConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobHiveConfig.Companion.toKotlin as jobHiveConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPigConfig.Companion.toKotlin as jobPigConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPlacement.Companion.toKotlin as jobPlacementToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPrestoConfig.Companion.toKotlin as jobPrestoConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobPysparkConfig.Companion.toKotlin as jobPysparkConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobReference.Companion.toKotlin as jobReferenceToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobScheduling.Companion.toKotlin as jobSchedulingToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobSparkConfig.Companion.toKotlin as jobSparkConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobSparksqlConfig.Companion.toKotlin as jobSparksqlConfigToKotlin
import com.pulumi.gcp.dataproc.kotlin.outputs.JobStatus.Companion.toKotlin as jobStatusToKotlin

/**
 * Builder for [Job].
 */
@PulumiTagMarker
public class JobResourceBuilder internal constructor() {
    public var name: String? = null

    public var args: JobArgs = JobArgs()

    public var opts: CustomResourceOptions = CustomResourceOptions()

    /**
     * @param name The _unique_ name of the resulting resource.
     */
    public fun name(`value`: String) {
        this.name = value
    }

    /**
     * @param block The arguments to use to populate this resource's properties.
     */
    public suspend fun args(block: suspend JobArgsBuilder.() -> Unit) {
        val builder = JobArgsBuilder()
        block(builder)
        this.args = builder.build()
    }

    /**
     * @param block A bag of options that control this resource's behavior.
     */
    public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
        this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
    }

    internal fun build(): Job {
        val builtJavaResource = com.pulumi.gcp.dataproc.Job(
            this.name,
            this.args.toJava(),
            this.opts.toJava(),
        )
        return Job(builtJavaResource)
    }
}

/**
 * Manages a job resource within a Dataproc cluster within GCE. For more information see
 * [the official dataproc documentation](https://cloud.google.com/dataproc/).
 * !> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.
 * ## Example Usage
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const mycluster = new gcp.dataproc.Cluster("mycluster", {
 *     name: "dproc-cluster-unique-name",
 *     region: "us-central1",
 * });
 * // Submit an example spark job to a dataproc cluster
 * const spark = new gcp.dataproc.Job("spark", {
 *     region: mycluster.region,
 *     forceDelete: true,
 *     placement: {
 *         clusterName: mycluster.name,
 *     },
 *     sparkConfig: {
 *         mainClass: "org.apache.spark.examples.SparkPi",
 *         jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         args: ["1000"],
 *         properties: {
 *             "spark.logConf": "true",
 *         },
 *         loggingConfig: {
 *             driverLogLevels: {
 *                 root: "INFO",
 *             },
 *         },
 *     },
 * });
 * // Submit an example pyspark job to a dataproc cluster
 * const pyspark = new gcp.dataproc.Job("pyspark", {
 *     region: mycluster.region,
 *     forceDelete: true,
 *     placement: {
 *         clusterName: mycluster.name,
 *     },
 *     pysparkConfig: {
 *         mainPythonFileUri: "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
 *         properties: {
 *             "spark.logConf": "true",
 *         },
 *     },
 * });
 * export const sparkStatus = spark.statuses.apply(statuses => statuses[0].state);
 * export const pysparkStatus = pyspark.statuses.apply(statuses => statuses[0].state);
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * mycluster = gcp.dataproc.Cluster("mycluster",
 *     name="dproc-cluster-unique-name",
 *     region="us-central1")
 * # Submit an example spark job to a dataproc cluster
 * spark = gcp.dataproc.Job("spark",
 *     region=mycluster.region,
 *     force_delete=True,
 *     placement=gcp.dataproc.JobPlacementArgs(
 *         cluster_name=mycluster.name,
 *     ),
 *     spark_config=gcp.dataproc.JobSparkConfigArgs(
 *         main_class="org.apache.spark.examples.SparkPi",
 *         jar_file_uris=["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
 *         args=["1000"],
 *         properties={
 *             "spark.logConf": "true",
 *         },
 *         logging_config=gcp.dataproc.JobSparkConfigLoggingConfigArgs(
 *             driver_log_levels={
 *                 "root": "INFO",
 *             },
 *         ),
 *     ))
 * # Submit an example pyspark job to a dataproc cluster
 * pyspark = gcp.dataproc.Job("pyspark",
 *     region=mycluster.region,
 *     force_delete=True,
 *     placement=gcp.dataproc.JobPlacementArgs(
 *         cluster_name=mycluster.name,
 *     ),
 *     pyspark_config=gcp.dataproc.JobPysparkConfigArgs(
 *         main_python_file_uri="gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
 *         properties={
 *             "spark.logConf": "true",
 *         },
 *     ))
 * pulumi.export("sparkStatus", spark.statuses[0].state)
 * pulumi.export("pysparkStatus", pyspark.statuses[0].state)
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
 *     {
 *         Name = "dproc-cluster-unique-name",
 *         Region = "us-central1",
 *     });
 *     // Submit an example spark job to a dataproc cluster
 *     var spark = new Gcp.Dataproc.Job("spark", new()
 *     {
 *         Region = mycluster.Region,
 *         ForceDelete = true,
 *         Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
 *         {
 *             ClusterName = mycluster.Name,
 *         },
 *         SparkConfig = new Gcp.Dataproc.Inputs.JobSparkConfigArgs
 *         {
 *             MainClass = "org.apache.spark.examples.SparkPi",
 *             JarFileUris = new[]
 *             {
 *                 "file:///usr/lib/spark/examples/jars/spark-examples.jar",
 *             },
 *             Args = new[]
 *             {
 *                 "1000",
 *             },
 *             Properties =
 *             {
 *                 { "spark.logConf", "true" },
 *             },
 *             LoggingConfig = new Gcp.Dataproc.Inputs.JobSparkConfigLoggingConfigArgs
 *             {
 *                 DriverLogLevels =
 *                 {
 *                     { "root", "INFO" },
 *                 },
 *             },
 *         },
 *     });
 *     // Submit an example pyspark job to a dataproc cluster
 *     var pyspark = new Gcp.Dataproc.Job("pyspark", new()
 *     {
 *         Region = mycluster.Region,
 *         ForceDelete = true,
 *         Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
 *         {
 *             ClusterName = mycluster.Name,
 *         },
 *         PysparkConfig = new Gcp.Dataproc.Inputs.JobPysparkConfigArgs
 *         {
 *             MainPythonFileUri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
 *             Properties =
 *             {
 *                 { "spark.logConf", "true" },
 *             },
 *         },
 *     });
 *     return new Dictionary
 *     {
 *         ["sparkStatus"] = spark.Statuses.Apply(statuses => statuses[0].State),
 *         ["pysparkStatus"] = pyspark.Statuses.Apply(statuses => statuses[0].State),
 *     };
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		mycluster, err := dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
 * 			Name:   pulumi.String("dproc-cluster-unique-name"),
 * 			Region: pulumi.String("us-central1"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		// Submit an example spark job to a dataproc cluster
 * 		spark, err := dataproc.NewJob(ctx, "spark", &dataproc.JobArgs{
 * 			Region:      mycluster.Region,
 * 			ForceDelete: pulumi.Bool(true),
 * 			Placement: &dataproc.JobPlacementArgs{
 * 				ClusterName: mycluster.Name,
 * 			},
 * 			SparkConfig: &dataproc.JobSparkConfigArgs{
 * 				MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
 * 				JarFileUris: pulumi.StringArray{
 * 					pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
 * 				},
 * 				Args: pulumi.StringArray{
 * 					pulumi.String("1000"),
 * 				},
 * 				Properties: pulumi.StringMap{
 * 					"spark.logConf": pulumi.String("true"),
 * 				},
 * 				LoggingConfig: &dataproc.JobSparkConfigLoggingConfigArgs{
 * 					DriverLogLevels: pulumi.StringMap{
 * 						"root": pulumi.String("INFO"),
 * 					},
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		// Submit an example pyspark job to a dataproc cluster
 * 		pyspark, err := dataproc.NewJob(ctx, "pyspark", &dataproc.JobArgs{
 * 			Region:      mycluster.Region,
 * 			ForceDelete: pulumi.Bool(true),
 * 			Placement: &dataproc.JobPlacementArgs{
 * 				ClusterName: mycluster.Name,
 * 			},
 * 			PysparkConfig: &dataproc.JobPysparkConfigArgs{
 * 				MainPythonFileUri: pulumi.String("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py"),
 * 				Properties: pulumi.StringMap{
 * 					"spark.logConf": pulumi.String("true"),
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		ctx.Export("sparkStatus", spark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
 * 			return &statuses[0].State, nil
 * 		}).(pulumi.StringPtrOutput))
 * 		ctx.Export("pysparkStatus", pyspark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
 * 			return &statuses[0].State, nil
 * 		}).(pulumi.StringPtrOutput))
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.dataproc.Cluster;
 * import com.pulumi.gcp.dataproc.ClusterArgs;
 * import com.pulumi.gcp.dataproc.Job;
 * import com.pulumi.gcp.dataproc.JobArgs;
 * import com.pulumi.gcp.dataproc.inputs.JobPlacementArgs;
 * import com.pulumi.gcp.dataproc.inputs.JobSparkConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.JobSparkConfigLoggingConfigArgs;
 * import com.pulumi.gcp.dataproc.inputs.JobPysparkConfigArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var mycluster = new Cluster("mycluster", ClusterArgs.builder()
 *             .name("dproc-cluster-unique-name")
 *             .region("us-central1")
 *             .build());
 *         // Submit an example spark job to a dataproc cluster
 *         var spark = new Job("spark", JobArgs.builder()
 *             .region(mycluster.region())
 *             .forceDelete(true)
 *             .placement(JobPlacementArgs.builder()
 *                 .clusterName(mycluster.name())
 *                 .build())
 *             .sparkConfig(JobSparkConfigArgs.builder()
 *                 .mainClass("org.apache.spark.examples.SparkPi")
 *                 .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
 *                 .args("1000")
 *                 .properties(Map.of("spark.logConf", "true"))
 *                 .loggingConfig(JobSparkConfigLoggingConfigArgs.builder()
 *                     .driverLogLevels(Map.of("root", "INFO"))
 *                     .build())
 *                 .build())
 *             .build());
 *         // Submit an example pyspark job to a dataproc cluster
 *         var pyspark = new Job("pyspark", JobArgs.builder()
 *             .region(mycluster.region())
 *             .forceDelete(true)
 *             .placement(JobPlacementArgs.builder()
 *                 .clusterName(mycluster.name())
 *                 .build())
 *             .pysparkConfig(JobPysparkConfigArgs.builder()
 *                 .mainPythonFileUri("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py")
 *                 .properties(Map.of("spark.logConf", "true"))
 *                 .build())
 *             .build());
 *         ctx.export("sparkStatus", spark.statuses().applyValue(statuses -> statuses[0].state()));
 *         ctx.export("pysparkStatus", pyspark.statuses().applyValue(statuses -> statuses[0].state()));
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   mycluster:
 *     type: gcp:dataproc:Cluster
 *     properties:
 *       name: dproc-cluster-unique-name
 *       region: us-central1
 *   # Submit an example spark job to a dataproc cluster
 *   spark:
 *     type: gcp:dataproc:Job
 *     properties:
 *       region: ${mycluster.region}
 *       forceDelete: true
 *       placement:
 *         clusterName: ${mycluster.name}
 *       sparkConfig:
 *         mainClass: org.apache.spark.examples.SparkPi
 *         jarFileUris:
 *           - file:///usr/lib/spark/examples/jars/spark-examples.jar
 *         args:
 *           - '1000'
 *         properties:
 *           spark.logConf: 'true'
 *         loggingConfig:
 *           driverLogLevels:
 *             root: INFO
 *   # Submit an example pyspark job to a dataproc cluster
 *   pyspark:
 *     type: gcp:dataproc:Job
 *     properties:
 *       region: ${mycluster.region}
 *       forceDelete: true
 *       placement:
 *         clusterName: ${mycluster.name}
 *       pysparkConfig:
 *         mainPythonFileUri: gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py
 *         properties:
 *           spark.logConf: 'true'
 * outputs:
 *   # Check out current state of the jobs
 *   sparkStatus: ${spark.statuses[0].state}
 *   pysparkStatus: ${pyspark.statuses[0].state}
 * ```
 * 
 * ## Import
 * This resource does not support import.
 */
public class Job internal constructor(
    override val javaResource: com.pulumi.gcp.dataproc.Job,
) : KotlinCustomResource(javaResource, JobMapper) {
    /**
     * If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.
     */
    public val driverControlsFilesUri: Output
        get() = javaResource.driverControlsFilesUri().applyValue({ args0 -> args0 })

    /**
     * A URI pointing to the location of the stdout of the job's driver program.
     */
    public val driverOutputResourceUri: Output
        get() = javaResource.driverOutputResourceUri().applyValue({ args0 -> args0 })

    /**
     * All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Pulumi, other clients and services.
     * * `scheduling.max_failures_per_hour` - (Required) Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.
     * * `scheduling.max_failures_total` - (Required) Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.
     */
    public val effectiveLabels: Output>
        get() = javaResource.effectiveLabels().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.key.to(args0.value)
            }).toMap()
        })

    /**
     * By default, you can only delete inactive jobs within
     * Dataproc. Setting this to true, and calling destroy, will ensure that the
     * job is first cancelled before issuing the delete.
     */
    public val forceDelete: Output?
        get() = javaResource.forceDelete().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * The config of Hadoop job
     */
    public val hadoopConfig: Output?
        get() = javaResource.hadoopConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobHadoopConfigToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The config of hive job
     */
    public val hiveConfig: Output?
        get() = javaResource.hiveConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobHiveConfigToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The list of labels (key/value pairs) to add to the job.
     * **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
     * Please refer to the field 'effective_labels' for all of the labels present on the resource.
     */
    public val labels: Output>?
        get() = javaResource.labels().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 ->
                    args0.key.to(args0.value)
                }).toMap()
            }).orElse(null)
        })

    /**
     * The config of pag job.
     */
    public val pigConfig: Output?
        get() = javaResource.pigConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobPigConfigToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The config of job placement.
     */
    public val placement: Output
        get() = javaResource.placement().applyValue({ args0 ->
            args0.let({ args0 ->
                jobPlacementToKotlin(args0)
            })
        })

    /**
     * The config of presto job
     */
    public val prestoConfig: Output?
        get() = javaResource.prestoConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobPrestoConfigToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The project in which the `cluster` can be found and jobs
     * subsequently run against. If it is not provided, the provider project is used.
     */
    public val project: Output
        get() = javaResource.project().applyValue({ args0 -> args0 })

    /**
     * The combination of labels configured directly on the resource and default labels configured on the provider.
     */
    public val pulumiLabels: Output>
        get() = javaResource.pulumiLabels().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.key.to(args0.value)
            }).toMap()
        })

    /**
     * The config of pySpark job.
     */
    public val pysparkConfig: Output?
        get() = javaResource.pysparkConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> jobPysparkConfigToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * The reference of the job
     */
    public val reference: Output
        get() = javaResource.reference().applyValue({ args0 ->
            args0.let({ args0 ->
                jobReferenceToKotlin(args0)
            })
        })

    /**
     * The Cloud Dataproc region. This essentially determines which clusters are available
     * for this job to be submitted to. If not specified, defaults to `global`.
     */
    public val region: Output?
        get() = javaResource.region().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * Optional. Job scheduling configuration.
     */
    public val scheduling: Output?
        get() = javaResource.scheduling().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobSchedulingToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The config of the Spark job.
     */
    public val sparkConfig: Output?
        get() = javaResource.sparkConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobSparkConfigToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * The config of SparkSql job
     */
    public val sparksqlConfig: Output?
        get() = javaResource.sparksqlConfig().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 -> jobSparksqlConfigToKotlin(args0) })
            }).orElse(null)
        })

    /**
     * The status of the job.
     */
    public val statuses: Output>
        get() = javaResource.statuses().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    jobStatusToKotlin(args0)
                })
            })
        })
}

public object JobMapper : ResourceMapper {
    override fun supportsMappingOfType(javaResource: Resource): Boolean =
        com.pulumi.gcp.dataproc.Job::class == javaResource::class

    override fun map(javaResource: Resource): Job = Job(javaResource as com.pulumi.gcp.dataproc.Job)
}

/**
 * @see [Job].
 * @param name The _unique_ name of the resulting resource.
 * @param block Builder for [Job].
 */
public suspend fun job(name: String, block: suspend JobResourceBuilder.() -> Unit): Job {
    val builder = JobResourceBuilder()
    builder.name(name)
    block(builder)
    return builder.build()
}

/**
 * @see [Job].
 * @param name The _unique_ name of the resulting resource.
 */
public fun job(name: String): Job {
    val builder = JobResourceBuilder()
    builder.name(name)
    return builder.build()
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy