com.pulumi.gcp.dataproc.kotlin.JobArgs.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-gcp-kotlin Show documentation
Show all versions of pulumi-gcp-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.dataproc.kotlin
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.JobArgs.builder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHadoopConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHadoopConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHiveConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobHiveConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPigConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPigConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPlacementArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPlacementArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPrestoConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPrestoConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPysparkConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobPysparkConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobReferenceArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobReferenceArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSchedulingArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSchedulingArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparkConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparkConfigArgsBuilder
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparksqlConfigArgs
import com.pulumi.gcp.dataproc.kotlin.inputs.JobSparksqlConfigArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Boolean
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName
/**
* Manages a job resource within a Dataproc cluster within GCE. For more information see
* [the official dataproc documentation](https://cloud.google.com/dataproc/).
* !> **Note:** This resource does not support 'update' and changing any attributes will cause the resource to be recreated.
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as gcp from "@pulumi/gcp";
* const mycluster = new gcp.dataproc.Cluster("mycluster", {
* name: "dproc-cluster-unique-name",
* region: "us-central1",
* });
* // Submit an example spark job to a dataproc cluster
* const spark = new gcp.dataproc.Job("spark", {
* region: mycluster.region,
* forceDelete: true,
* placement: {
* clusterName: mycluster.name,
* },
* sparkConfig: {
* mainClass: "org.apache.spark.examples.SparkPi",
* jarFileUris: ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
* args: ["1000"],
* properties: {
* "spark.logConf": "true",
* },
* loggingConfig: {
* driverLogLevels: {
* root: "INFO",
* },
* },
* },
* });
* // Submit an example pyspark job to a dataproc cluster
* const pyspark = new gcp.dataproc.Job("pyspark", {
* region: mycluster.region,
* forceDelete: true,
* placement: {
* clusterName: mycluster.name,
* },
* pysparkConfig: {
* mainPythonFileUri: "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* properties: {
* "spark.logConf": "true",
* },
* },
* });
* export const sparkStatus = spark.statuses.apply(statuses => statuses[0].state);
* export const pysparkStatus = pyspark.statuses.apply(statuses => statuses[0].state);
* ```
* ```python
* import pulumi
* import pulumi_gcp as gcp
* mycluster = gcp.dataproc.Cluster("mycluster",
* name="dproc-cluster-unique-name",
* region="us-central1")
* # Submit an example spark job to a dataproc cluster
* spark = gcp.dataproc.Job("spark",
* region=mycluster.region,
* force_delete=True,
* placement=gcp.dataproc.JobPlacementArgs(
* cluster_name=mycluster.name,
* ),
* spark_config=gcp.dataproc.JobSparkConfigArgs(
* main_class="org.apache.spark.examples.SparkPi",
* jar_file_uris=["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
* args=["1000"],
* properties={
* "spark.logConf": "true",
* },
* logging_config=gcp.dataproc.JobSparkConfigLoggingConfigArgs(
* driver_log_levels={
* "root": "INFO",
* },
* ),
* ))
* # Submit an example pyspark job to a dataproc cluster
* pyspark = gcp.dataproc.Job("pyspark",
* region=mycluster.region,
* force_delete=True,
* placement=gcp.dataproc.JobPlacementArgs(
* cluster_name=mycluster.name,
* ),
* pyspark_config=gcp.dataproc.JobPysparkConfigArgs(
* main_python_file_uri="gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* properties={
* "spark.logConf": "true",
* },
* ))
* pulumi.export("sparkStatus", spark.statuses[0].state)
* pulumi.export("pysparkStatus", pyspark.statuses[0].state)
* ```
* ```csharp
* using System.Collections.Generic;
* using System.Linq;
* using Pulumi;
* using Gcp = Pulumi.Gcp;
* return await Deployment.RunAsync(() =>
* {
* var mycluster = new Gcp.Dataproc.Cluster("mycluster", new()
* {
* Name = "dproc-cluster-unique-name",
* Region = "us-central1",
* });
* // Submit an example spark job to a dataproc cluster
* var spark = new Gcp.Dataproc.Job("spark", new()
* {
* Region = mycluster.Region,
* ForceDelete = true,
* Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
* {
* ClusterName = mycluster.Name,
* },
* SparkConfig = new Gcp.Dataproc.Inputs.JobSparkConfigArgs
* {
* MainClass = "org.apache.spark.examples.SparkPi",
* JarFileUris = new[]
* {
* "file:///usr/lib/spark/examples/jars/spark-examples.jar",
* },
* Args = new[]
* {
* "1000",
* },
* Properties =
* {
* { "spark.logConf", "true" },
* },
* LoggingConfig = new Gcp.Dataproc.Inputs.JobSparkConfigLoggingConfigArgs
* {
* DriverLogLevels =
* {
* { "root", "INFO" },
* },
* },
* },
* });
* // Submit an example pyspark job to a dataproc cluster
* var pyspark = new Gcp.Dataproc.Job("pyspark", new()
* {
* Region = mycluster.Region,
* ForceDelete = true,
* Placement = new Gcp.Dataproc.Inputs.JobPlacementArgs
* {
* ClusterName = mycluster.Name,
* },
* PysparkConfig = new Gcp.Dataproc.Inputs.JobPysparkConfigArgs
* {
* MainPythonFileUri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py",
* Properties =
* {
* { "spark.logConf", "true" },
* },
* },
* });
* return new Dictionary
* {
* ["sparkStatus"] = spark.Statuses.Apply(statuses => statuses[0].State),
* ["pysparkStatus"] = pyspark.Statuses.Apply(statuses => statuses[0].State),
* };
* });
* ```
* ```go
* package main
* import (
* "github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataproc"
* "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
* )
* func main() {
* pulumi.Run(func(ctx *pulumi.Context) error {
* mycluster, err := dataproc.NewCluster(ctx, "mycluster", &dataproc.ClusterArgs{
* Name: pulumi.String("dproc-cluster-unique-name"),
* Region: pulumi.String("us-central1"),
* })
* if err != nil {
* return err
* }
* // Submit an example spark job to a dataproc cluster
* spark, err := dataproc.NewJob(ctx, "spark", &dataproc.JobArgs{
* Region: mycluster.Region,
* ForceDelete: pulumi.Bool(true),
* Placement: &dataproc.JobPlacementArgs{
* ClusterName: mycluster.Name,
* },
* SparkConfig: &dataproc.JobSparkConfigArgs{
* MainClass: pulumi.String("org.apache.spark.examples.SparkPi"),
* JarFileUris: pulumi.StringArray{
* pulumi.String("file:///usr/lib/spark/examples/jars/spark-examples.jar"),
* },
* Args: pulumi.StringArray{
* pulumi.String("1000"),
* },
* Properties: pulumi.StringMap{
* "spark.logConf": pulumi.String("true"),
* },
* LoggingConfig: &dataproc.JobSparkConfigLoggingConfigArgs{
* DriverLogLevels: pulumi.StringMap{
* "root": pulumi.String("INFO"),
* },
* },
* },
* })
* if err != nil {
* return err
* }
* // Submit an example pyspark job to a dataproc cluster
* pyspark, err := dataproc.NewJob(ctx, "pyspark", &dataproc.JobArgs{
* Region: mycluster.Region,
* ForceDelete: pulumi.Bool(true),
* Placement: &dataproc.JobPlacementArgs{
* ClusterName: mycluster.Name,
* },
* PysparkConfig: &dataproc.JobPysparkConfigArgs{
* MainPythonFileUri: pulumi.String("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py"),
* Properties: pulumi.StringMap{
* "spark.logConf": pulumi.String("true"),
* },
* },
* })
* if err != nil {
* return err
* }
* ctx.Export("sparkStatus", spark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
* return &statuses[0].State, nil
* }).(pulumi.StringPtrOutput))
* ctx.Export("pysparkStatus", pyspark.Statuses.ApplyT(func(statuses []dataproc.JobStatus) (*string, error) {
* return &statuses[0].State, nil
* }).(pulumi.StringPtrOutput))
* return nil
* })
* }
* ```
* ```java
* package generated_program;
* import com.pulumi.Context;
* import com.pulumi.Pulumi;
* import com.pulumi.core.Output;
* import com.pulumi.gcp.dataproc.Cluster;
* import com.pulumi.gcp.dataproc.ClusterArgs;
* import com.pulumi.gcp.dataproc.Job;
* import com.pulumi.gcp.dataproc.JobArgs;
* import com.pulumi.gcp.dataproc.inputs.JobPlacementArgs;
* import com.pulumi.gcp.dataproc.inputs.JobSparkConfigArgs;
* import com.pulumi.gcp.dataproc.inputs.JobSparkConfigLoggingConfigArgs;
* import com.pulumi.gcp.dataproc.inputs.JobPysparkConfigArgs;
* import java.util.List;
* import java.util.ArrayList;
* import java.util.Map;
* import java.io.File;
* import java.nio.file.Files;
* import java.nio.file.Paths;
* public class App {
* public static void main(String[] args) {
* Pulumi.run(App::stack);
* }
* public static void stack(Context ctx) {
* var mycluster = new Cluster("mycluster", ClusterArgs.builder()
* .name("dproc-cluster-unique-name")
* .region("us-central1")
* .build());
* // Submit an example spark job to a dataproc cluster
* var spark = new Job("spark", JobArgs.builder()
* .region(mycluster.region())
* .forceDelete(true)
* .placement(JobPlacementArgs.builder()
* .clusterName(mycluster.name())
* .build())
* .sparkConfig(JobSparkConfigArgs.builder()
* .mainClass("org.apache.spark.examples.SparkPi")
* .jarFileUris("file:///usr/lib/spark/examples/jars/spark-examples.jar")
* .args("1000")
* .properties(Map.of("spark.logConf", "true"))
* .loggingConfig(JobSparkConfigLoggingConfigArgs.builder()
* .driverLogLevels(Map.of("root", "INFO"))
* .build())
* .build())
* .build());
* // Submit an example pyspark job to a dataproc cluster
* var pyspark = new Job("pyspark", JobArgs.builder()
* .region(mycluster.region())
* .forceDelete(true)
* .placement(JobPlacementArgs.builder()
* .clusterName(mycluster.name())
* .build())
* .pysparkConfig(JobPysparkConfigArgs.builder()
* .mainPythonFileUri("gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py")
* .properties(Map.of("spark.logConf", "true"))
* .build())
* .build());
* ctx.export("sparkStatus", spark.statuses().applyValue(statuses -> statuses[0].state()));
* ctx.export("pysparkStatus", pyspark.statuses().applyValue(statuses -> statuses[0].state()));
* }
* }
* ```
* ```yaml
* resources:
* mycluster:
* type: gcp:dataproc:Cluster
* properties:
* name: dproc-cluster-unique-name
* region: us-central1
* # Submit an example spark job to a dataproc cluster
* spark:
* type: gcp:dataproc:Job
* properties:
* region: ${mycluster.region}
* forceDelete: true
* placement:
* clusterName: ${mycluster.name}
* sparkConfig:
* mainClass: org.apache.spark.examples.SparkPi
* jarFileUris:
* - file:///usr/lib/spark/examples/jars/spark-examples.jar
* args:
* - '1000'
* properties:
* spark.logConf: 'true'
* loggingConfig:
* driverLogLevels:
* root: INFO
* # Submit an example pyspark job to a dataproc cluster
* pyspark:
* type: gcp:dataproc:Job
* properties:
* region: ${mycluster.region}
* forceDelete: true
* placement:
* clusterName: ${mycluster.name}
* pysparkConfig:
* mainPythonFileUri: gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py
* properties:
* spark.logConf: 'true'
* outputs:
* # Check out current state of the jobs
* sparkStatus: ${spark.statuses[0].state}
* pysparkStatus: ${pyspark.statuses[0].state}
* ```
*
* ## Import
* This resource does not support import.
* @property forceDelete By default, you can only delete inactive jobs within
* Dataproc. Setting this to true, and calling destroy, will ensure that the
* job is first cancelled before issuing the delete.
* @property hadoopConfig The config of Hadoop job
* @property hiveConfig The config of hive job
* @property labels The list of labels (key/value pairs) to add to the job.
* **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
* Please refer to the field 'effective_labels' for all of the labels present on the resource.
* @property pigConfig The config of pag job.
* @property placement The config of job placement.
* @property prestoConfig The config of presto job
* @property project The project in which the `cluster` can be found and jobs
* subsequently run against. If it is not provided, the provider project is used.
* @property pysparkConfig The config of pySpark job.
* @property reference The reference of the job
* @property region The Cloud Dataproc region. This essentially determines which clusters are available
* for this job to be submitted to. If not specified, defaults to `global`.
* @property scheduling Optional. Job scheduling configuration.
* @property sparkConfig The config of the Spark job.
* @property sparksqlConfig The config of SparkSql job
*/
public data class JobArgs(
public val forceDelete: Output? = null,
public val hadoopConfig: Output? = null,
public val hiveConfig: Output? = null,
public val labels: Output
© 2015 - 2025 Weber Informatics LLC | Privacy Policy