All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataflow.kotlin.Pipeline.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.12.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataflow.kotlin

import com.pulumi.core.Output
import com.pulumi.gcp.dataflow.kotlin.outputs.PipelineScheduleInfo
import com.pulumi.gcp.dataflow.kotlin.outputs.PipelineWorkload
import com.pulumi.kotlin.KotlinCustomResource
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.ResourceMapper
import com.pulumi.kotlin.options.CustomResourceOptions
import com.pulumi.kotlin.options.CustomResourceOptionsBuilder
import com.pulumi.resources.Resource
import kotlin.Boolean
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import com.pulumi.gcp.dataflow.kotlin.outputs.PipelineScheduleInfo.Companion.toKotlin as pipelineScheduleInfoToKotlin
import com.pulumi.gcp.dataflow.kotlin.outputs.PipelineWorkload.Companion.toKotlin as pipelineWorkloadToKotlin

/**
 * Builder for [Pipeline].
 */
@PulumiTagMarker
public class PipelineResourceBuilder internal constructor() {
    public var name: String? = null

    public var args: PipelineArgs = PipelineArgs()

    public var opts: CustomResourceOptions = CustomResourceOptions()

    /**
     * @param name The _unique_ name of the resulting resource.
     */
    public fun name(`value`: String) {
        this.name = value
    }

    /**
     * @param block The arguments to use to populate this resource's properties.
     */
    public suspend fun args(block: suspend PipelineArgsBuilder.() -> Unit) {
        val builder = PipelineArgsBuilder()
        block(builder)
        this.args = builder.build()
    }

    /**
     * @param block A bag of options that control this resource's behavior.
     */
    public suspend fun opts(block: suspend CustomResourceOptionsBuilder.() -> Unit) {
        this.opts = com.pulumi.kotlin.options.CustomResourceOptions.opts(block)
    }

    internal fun build(): Pipeline {
        val builtJavaResource = com.pulumi.gcp.dataflow.Pipeline(
            this.name,
            this.args.toJava(),
            this.opts.toJava(),
        )
        return Pipeline(builtJavaResource)
    }
}

/**
 *  /* /* /* /* /* /*
 * The main pipeline entity and all the necessary metadata for launching and managing linked jobs.
 * To get more information about Pipeline, see:
 * * [API documentation](https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines)
 * * How-to Guides
 *     * [Official Documentation](https://cloud.google.com/dataflow)
 * ## Example Usage
 * ### Data Pipeline Pipeline
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const serviceAccount = new gcp.serviceaccount.Account("service_account", {
 *     accountId: "my-account",
 *     displayName: "Service Account",
 * });
 * const primary = new gcp.dataflow.Pipeline("primary", {
 *     name: "my-pipeline",
 *     displayName: "my-pipeline",
 *     type: "PIPELINE_TYPE_BATCH",
 *     state: "STATE_ACTIVE",
 *     region: "us-central1",
 *     workload: {
 *         dataflowLaunchTemplateRequest: {
 *             projectId: "my-project",
 *             gcsPath: "gs://my-bucket/path",
 *             launchParameters: {
 *                 jobName: "my-job",
 *                 parameters: {
 *                     name: "wrench",
 *                 },
 *                 environment: {
 *                     numWorkers: 5,
 *                     maxWorkers: 5,
 *                     zone: "us-centra1-a",
 *                     serviceAccountEmail: serviceAccount.email,
 *                     network: "default",
 *                     tempLocation: "gs://my-bucket/tmp_dir",
 *                     bypassTempDirValidation: false,
 *                     machineType: "E2",
 *                     additionalUserLabels: {
 *                         context: "test",
 *                     },
 *                     workerRegion: "us-central1",
 *                     workerZone: "us-central1-a",
 *                     enableStreamingEngine: false,
 *                 },
 *                 update: false,
 *                 transformNameMapping: {
 *                     name: "wrench",
 *                 },
 *             },
 *             location: "us-central1",
 *         },
 *     },
 *     scheduleInfo: {
 *         schedule: "* */2 * * *",
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * service_account = gcp.serviceaccount.Account("service_account",
 *     account_id="my-account",
 *     display_name="Service Account")
 * primary = gcp.dataflow.Pipeline("primary",
 *     name="my-pipeline",
 *     display_name="my-pipeline",
 *     type="PIPELINE_TYPE_BATCH",
 *     state="STATE_ACTIVE",
 *     region="us-central1",
 *     workload=gcp.dataflow.PipelineWorkloadArgs(
 *         dataflow_launch_template_request=gcp.dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs(
 *             project_id="my-project",
 *             gcs_path="gs://my-bucket/path",
 *             launch_parameters=gcp.dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs(
 *                 job_name="my-job",
 *                 parameters={
 *                     "name": "wrench",
 *                 },
 *                 environment=gcp.dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs(
 *                     num_workers=5,
 *                     max_workers=5,
 *                     zone="us-centra1-a",
 *                     service_account_email=service_account.email,
 *                     network="default",
 *                     temp_location="gs://my-bucket/tmp_dir",
 *                     bypass_temp_dir_validation=False,
 *                     machine_type="E2",
 *                     additional_user_labels={
 *                         "context": "test",
 *                     },
 *                     worker_region="us-central1",
 *                     worker_zone="us-central1-a",
 *                     enable_streaming_engine=False,
 *                 ),
 *                 update=False,
 *                 transform_name_mapping={
 *                     "name": "wrench",
 *                 },
 *             ),
 *             location="us-central1",
 *         ),
 *     ),
 *     schedule_info=gcp.dataflow.PipelineScheduleInfoArgs(
 *         schedule="* */2 * * *",
 *     ))
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var serviceAccount = new Gcp.ServiceAccount.Account("service_account", new()
 *     {
 *         AccountId = "my-account",
 *         DisplayName = "Service Account",
 *     });
 *     var primary = new Gcp.Dataflow.Pipeline("primary", new()
 *     {
 *         Name = "my-pipeline",
 *         DisplayName = "my-pipeline",
 *         Type = "PIPELINE_TYPE_BATCH",
 *         State = "STATE_ACTIVE",
 *         Region = "us-central1",
 *         Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
 *         {
 *             DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
 *             {
 *                 ProjectId = "my-project",
 *                 GcsPath = "gs://my-bucket/path",
 *                 LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
 *                 {
 *                     JobName = "my-job",
 *                     Parameters =
 *                     {
 *                         { "name", "wrench" },
 *                     },
 *                     Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
 *                     {
 *                         NumWorkers = 5,
 *                         MaxWorkers = 5,
 *                         Zone = "us-centra1-a",
 *                         ServiceAccountEmail = serviceAccount.Email,
 *                         Network = "default",
 *                         TempLocation = "gs://my-bucket/tmp_dir",
 *                         BypassTempDirValidation = false,
 *                         MachineType = "E2",
 *                         AdditionalUserLabels =
 *                         {
 *                             { "context", "test" },
 *                         },
 *                         WorkerRegion = "us-central1",
 *                         WorkerZone = "us-central1-a",
 *                         EnableStreamingEngine = false,
 *                     },
 *                     Update = false,
 *                     TransformNameMapping =
 *                     {
 *                         { "name", "wrench" },
 *                     },
 *                 },
 *                 Location = "us-central1",
 *             },
 *         },
 *         ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
 *         {
 *             Schedule = "* */2 * * *",
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataflow"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		serviceAccount, err := serviceaccount.NewAccount(ctx, "service_account", &serviceaccount.AccountArgs{
 * 			AccountId:   pulumi.String("my-account"),
 * 			DisplayName: pulumi.String("Service Account"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = dataflow.NewPipeline(ctx, "primary", &dataflow.PipelineArgs{
 * 			Name:        pulumi.String("my-pipeline"),
 * 			DisplayName: pulumi.String("my-pipeline"),
 * 			Type:        pulumi.String("PIPELINE_TYPE_BATCH"),
 * 			State:       pulumi.String("STATE_ACTIVE"),
 * 			Region:      pulumi.String("us-central1"),
 * 			Workload: &dataflow.PipelineWorkloadArgs{
 * 				DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
 * 					ProjectId: pulumi.String("my-project"),
 * 					GcsPath:   pulumi.String("gs://my-bucket/path"),
 * 					LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
 * 						JobName: pulumi.String("my-job"),
 * 						Parameters: pulumi.StringMap{
 * 							"name": pulumi.String("wrench"),
 * 						},
 * 						Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
 * 							NumWorkers:              pulumi.Int(5),
 * 							MaxWorkers:              pulumi.Int(5),
 * 							Zone:                    pulumi.String("us-centra1-a"),
 * 							ServiceAccountEmail:     serviceAccount.Email,
 * 							Network:                 pulumi.String("default"),
 * 							TempLocation:            pulumi.String("gs://my-bucket/tmp_dir"),
 * 							BypassTempDirValidation: pulumi.Bool(false),
 * 							MachineType:             pulumi.String("E2"),
 * 							AdditionalUserLabels: pulumi.StringMap{
 * 								"context": pulumi.String("test"),
 * 							},
 * 							WorkerRegion:          pulumi.String("us-central1"),
 * 							WorkerZone:            pulumi.String("us-central1-a"),
 * 							EnableStreamingEngine: pulumi.Bool(false),
 * 						},
 * 						Update: pulumi.Bool(false),
 * 						TransformNameMapping: pulumi.StringMap{
 * 							"name": pulumi.String("wrench"),
 * 						},
 * 					},
 * 					Location: pulumi.String("us-central1"),
 * 				},
 * 			},
 * 			ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
 * 				Schedule: pulumi.String("* */2 * * *"),
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.serviceaccount.Account;
 * import com.pulumi.gcp.serviceaccount.AccountArgs;
 * import com.pulumi.gcp.dataflow.Pipeline;
 * import com.pulumi.gcp.dataflow.PipelineArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineScheduleInfoArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var serviceAccount = new Account("serviceAccount", AccountArgs.builder()
 *             .accountId("my-account")
 *             .displayName("Service Account")
 *             .build());
 *         var primary = new Pipeline("primary", PipelineArgs.builder()
 *             .name("my-pipeline")
 *             .displayName("my-pipeline")
 *             .type("PIPELINE_TYPE_BATCH")
 *             .state("STATE_ACTIVE")
 *             .region("us-central1")
 *             .workload(PipelineWorkloadArgs.builder()
 *                 .dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
 *                     .projectId("my-project")
 *                     .gcsPath("gs://my-bucket/path")
 *                     .launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
 *                         .jobName("my-job")
 *                         .parameters(Map.of("name", "wrench"))
 *                         .environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
 *                             .numWorkers(5)
 *                             .maxWorkers(5)
 *                             .zone("us-centra1-a")
 *                             .serviceAccountEmail(serviceAccount.email())
 *                             .network("default")
 *                             .tempLocation("gs://my-bucket/tmp_dir")
 *                             .bypassTempDirValidation(false)
 *                             .machineType("E2")
 *                             .additionalUserLabels(Map.of("context", "test"))
 *                             .workerRegion("us-central1")
 *                             .workerZone("us-central1-a")
 *                             .enableStreamingEngine("false")
 *                             .build())
 *                         .update(false)
 *                         .transformNameMapping(Map.of("name", "wrench"))
 *                         .build())
 *                     .location("us-central1")
 *                     .build())
 *                 .build())
 *             .scheduleInfo(PipelineScheduleInfoArgs.builder()
 *                 .schedule("* */2 * * *")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   serviceAccount:
 *     type: gcp:serviceaccount:Account
 *     name: service_account
 *     properties:
 *       accountId: my-account
 *       displayName: Service Account
 *   primary:
 *     type: gcp:dataflow:Pipeline
 *     properties:
 *       name: my-pipeline
 *       displayName: my-pipeline
 *       type: PIPELINE_TYPE_BATCH
 *       state: STATE_ACTIVE
 *       region: us-central1
 *       workload:
 *         dataflowLaunchTemplateRequest:
 *           projectId: my-project
 *           gcsPath: gs://my-bucket/path
 *           launchParameters:
 *             jobName: my-job
 *             parameters:
 *               name: wrench
 *             environment:
 *               numWorkers: 5
 *               maxWorkers: 5
 *               zone: us-centra1-a
 *               serviceAccountEmail: ${serviceAccount.email}
 *               network: default
 *               tempLocation: gs://my-bucket/tmp_dir
 *               bypassTempDirValidation: false
 *               machineType: E2
 *               additionalUserLabels:
 *                 context: test
 *               workerRegion: us-central1
 *               workerZone: us-central1-a
 *               enableStreamingEngine: 'false'
 *             update: false
 *             transformNameMapping:
 *               name: wrench
 *           location: us-central1
 *       scheduleInfo:
 *         schedule: '* */2 * * *'
 * ```
 * 
 * ## Import
 * Pipeline can be imported using any of these accepted formats:
 * * `projects/{{project}}/locations/{{region}}/pipelines/{{name}}`
 * * `{{project}}/{{region}}/{{name}}`
 * * `{{region}}/{{name}}`
 * * `{{name}}`
 * When using the `pulumi import` command, Pipeline can be imported using one of the formats above. For example:
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default projects/{{project}}/locations/{{region}}/pipelines/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{project}}/{{region}}/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{region}}/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{name}}
 * ```
 */
public class Pipeline internal constructor(
    override val javaResource: com.pulumi.gcp.dataflow.Pipeline,
) : KotlinCustomResource(javaResource, PipelineMapper) {
    /**
     * The timestamp when the pipeline was initially created. Set by the Data Pipelines service.
     * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
     */
    public val createTime: Output
        get() = javaResource.createTime().applyValue({ args0 -> args0 })

    /**
     * The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
     */
    public val displayName: Output?
        get() = javaResource.displayName().applyValue({ args0 ->
            args0.map({ args0 ->
                args0
            }).orElse(null)
        })

    /**
     * Number of jobs.
     */
    public val jobCount: Output
        get() = javaResource.jobCount().applyValue({ args0 -> args0 })

    /**
     * The timestamp when the pipeline was last modified. Set by the Data Pipelines service.
     * A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".
     */
    public val lastUpdateTime: Output
        get() = javaResource.lastUpdateTime().applyValue({ args0 -> args0 })

    /**
     * "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID."
     * "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects."
     * "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions."
     * "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
     */
    public val name: Output
        get() = javaResource.name().applyValue({ args0 -> args0 })

    /**
     * The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation.
     * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
     */
    public val pipelineSources: Output>?
        get() = javaResource.pipelineSources().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.map({ args0 -> args0.key.to(args0.value) }).toMap()
            }).orElse(null)
        })

    /**
     * The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    public val project: Output
        get() = javaResource.project().applyValue({ args0 -> args0 })

    /**
     * A reference to the region
     */
    public val region: Output?
        get() = javaResource.region().applyValue({ args0 -> args0.map({ args0 -> args0 }).orElse(null) })

    /**
     * Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec
     * Structure is documented below.
     */
    public val scheduleInfo: Output?
        get() = javaResource.scheduleInfo().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    pipelineScheduleInfoToKotlin(args0)
                })
            }).orElse(null)
        })

    /**
     * Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
     */
    public val schedulerServiceAccountEmail: Output
        get() = javaResource.schedulerServiceAccountEmail().applyValue({ args0 -> args0 })

    /**
     * The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
     * Possible values are: `STATE_UNSPECIFIED`, `STATE_RESUMING`, `STATE_ACTIVE`, `STATE_STOPPING`, `STATE_ARCHIVED`, `STATE_PAUSED`.
     * - - -
     */
    public val state: Output
        get() = javaResource.state().applyValue({ args0 -> args0 })

    /**
     * The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
     * Possible values are: `PIPELINE_TYPE_UNSPECIFIED`, `PIPELINE_TYPE_BATCH`, `PIPELINE_TYPE_STREAMING`.
     */
    public val type: Output
        get() = javaResource.type().applyValue({ args0 -> args0 })

    /**
     * Workload information for creating new jobs.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload
     * Structure is documented below.
     */
    public val workload: Output?
        get() = javaResource.workload().applyValue({ args0 ->
            args0.map({ args0 ->
                args0.let({ args0 ->
                    pipelineWorkloadToKotlin(args0)
                })
            }).orElse(null)
        })
}

public object PipelineMapper : ResourceMapper {
    override fun supportsMappingOfType(javaResource: Resource): Boolean =
        com.pulumi.gcp.dataflow.Pipeline::class == javaResource::class

    override fun map(javaResource: Resource): Pipeline = Pipeline(
        javaResource as
            com.pulumi.gcp.dataflow.Pipeline,
    )
}

/**
 * @see [Pipeline].
 * @param name The _unique_ name of the resulting resource.
 * @param block Builder for [Pipeline].
 */
public suspend fun pipeline(name: String, block: suspend PipelineResourceBuilder.() -> Unit): Pipeline {
    val builder = PipelineResourceBuilder()
    builder.name(name)
    block(builder)
    return builder.build()
}

/**
 * @see [Pipeline].
 * @param name The _unique_ name of the resulting resource.
 */
public fun pipeline(name: String): Pipeline {
    val builder = PipelineResourceBuilder()
    builder.name(name)
    return builder.build()
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy