All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.gcp.dataflow.kotlin.PipelineArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 8.13.1.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.gcp.dataflow.kotlin

import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataflow.PipelineArgs.builder
import com.pulumi.gcp.dataflow.kotlin.inputs.PipelineScheduleInfoArgs
import com.pulumi.gcp.dataflow.kotlin.inputs.PipelineScheduleInfoArgsBuilder
import com.pulumi.gcp.dataflow.kotlin.inputs.PipelineWorkloadArgs
import com.pulumi.gcp.dataflow.kotlin.inputs.PipelineWorkloadArgsBuilder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 *  /* /* /* /* /* /*
 * The main pipeline entity and all the necessary metadata for launching and managing linked jobs.
 * To get more information about Pipeline, see:
 * * [API documentation](https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines)
 * * How-to Guides
 *     * [Official Documentation](https://cloud.google.com/dataflow)
 * ## Example Usage
 * ### Data Pipeline Pipeline
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as gcp from "@pulumi/gcp";
 * const serviceAccount = new gcp.serviceaccount.Account("service_account", {
 *     accountId: "my-account",
 *     displayName: "Service Account",
 * });
 * const primary = new gcp.dataflow.Pipeline("primary", {
 *     name: "my-pipeline",
 *     displayName: "my-pipeline",
 *     type: "PIPELINE_TYPE_BATCH",
 *     state: "STATE_ACTIVE",
 *     region: "us-central1",
 *     workload: {
 *         dataflowLaunchTemplateRequest: {
 *             projectId: "my-project",
 *             gcsPath: "gs://my-bucket/path",
 *             launchParameters: {
 *                 jobName: "my-job",
 *                 parameters: {
 *                     name: "wrench",
 *                 },
 *                 environment: {
 *                     numWorkers: 5,
 *                     maxWorkers: 5,
 *                     zone: "us-centra1-a",
 *                     serviceAccountEmail: serviceAccount.email,
 *                     network: "default",
 *                     tempLocation: "gs://my-bucket/tmp_dir",
 *                     bypassTempDirValidation: false,
 *                     machineType: "E2",
 *                     additionalUserLabels: {
 *                         context: "test",
 *                     },
 *                     workerRegion: "us-central1",
 *                     workerZone: "us-central1-a",
 *                     enableStreamingEngine: false,
 *                 },
 *                 update: false,
 *                 transformNameMapping: {
 *                     name: "wrench",
 *                 },
 *             },
 *             location: "us-central1",
 *         },
 *     },
 *     scheduleInfo: {
 *         schedule: "* */2 * * *",
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_gcp as gcp
 * service_account = gcp.serviceaccount.Account("service_account",
 *     account_id="my-account",
 *     display_name="Service Account")
 * primary = gcp.dataflow.Pipeline("primary",
 *     name="my-pipeline",
 *     display_name="my-pipeline",
 *     type="PIPELINE_TYPE_BATCH",
 *     state="STATE_ACTIVE",
 *     region="us-central1",
 *     workload={
 *         "dataflow_launch_template_request": {
 *             "project_id": "my-project",
 *             "gcs_path": "gs://my-bucket/path",
 *             "launch_parameters": {
 *                 "job_name": "my-job",
 *                 "parameters": {
 *                     "name": "wrench",
 *                 },
 *                 "environment": {
 *                     "num_workers": 5,
 *                     "max_workers": 5,
 *                     "zone": "us-centra1-a",
 *                     "service_account_email": service_account.email,
 *                     "network": "default",
 *                     "temp_location": "gs://my-bucket/tmp_dir",
 *                     "bypass_temp_dir_validation": False,
 *                     "machine_type": "E2",
 *                     "additional_user_labels": {
 *                         "context": "test",
 *                     },
 *                     "worker_region": "us-central1",
 *                     "worker_zone": "us-central1-a",
 *                     "enable_streaming_engine": False,
 *                 },
 *                 "update": False,
 *                 "transform_name_mapping": {
 *                     "name": "wrench",
 *                 },
 *             },
 *             "location": "us-central1",
 *         },
 *     },
 *     schedule_info={
 *         "schedule": "* */2 * * *",
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Gcp = Pulumi.Gcp;
 * return await Deployment.RunAsync(() =>
 * {
 *     var serviceAccount = new Gcp.ServiceAccount.Account("service_account", new()
 *     {
 *         AccountId = "my-account",
 *         DisplayName = "Service Account",
 *     });
 *     var primary = new Gcp.Dataflow.Pipeline("primary", new()
 *     {
 *         Name = "my-pipeline",
 *         DisplayName = "my-pipeline",
 *         Type = "PIPELINE_TYPE_BATCH",
 *         State = "STATE_ACTIVE",
 *         Region = "us-central1",
 *         Workload = new Gcp.Dataflow.Inputs.PipelineWorkloadArgs
 *         {
 *             DataflowLaunchTemplateRequest = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs
 *             {
 *                 ProjectId = "my-project",
 *                 GcsPath = "gs://my-bucket/path",
 *                 LaunchParameters = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs
 *                 {
 *                     JobName = "my-job",
 *                     Parameters =
 *                     {
 *                         { "name", "wrench" },
 *                     },
 *                     Environment = new Gcp.Dataflow.Inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs
 *                     {
 *                         NumWorkers = 5,
 *                         MaxWorkers = 5,
 *                         Zone = "us-centra1-a",
 *                         ServiceAccountEmail = serviceAccount.Email,
 *                         Network = "default",
 *                         TempLocation = "gs://my-bucket/tmp_dir",
 *                         BypassTempDirValidation = false,
 *                         MachineType = "E2",
 *                         AdditionalUserLabels =
 *                         {
 *                             { "context", "test" },
 *                         },
 *                         WorkerRegion = "us-central1",
 *                         WorkerZone = "us-central1-a",
 *                         EnableStreamingEngine = false,
 *                     },
 *                     Update = false,
 *                     TransformNameMapping =
 *                     {
 *                         { "name", "wrench" },
 *                     },
 *                 },
 *                 Location = "us-central1",
 *             },
 *         },
 *         ScheduleInfo = new Gcp.Dataflow.Inputs.PipelineScheduleInfoArgs
 *         {
 *             Schedule = "* */2 * * *",
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/dataflow"
 * 	"github.com/pulumi/pulumi-gcp/sdk/v7/go/gcp/serviceaccount"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		serviceAccount, err := serviceaccount.NewAccount(ctx, "service_account", &serviceaccount.AccountArgs{
 * 			AccountId:   pulumi.String("my-account"),
 * 			DisplayName: pulumi.String("Service Account"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = dataflow.NewPipeline(ctx, "primary", &dataflow.PipelineArgs{
 * 			Name:        pulumi.String("my-pipeline"),
 * 			DisplayName: pulumi.String("my-pipeline"),
 * 			Type:        pulumi.String("PIPELINE_TYPE_BATCH"),
 * 			State:       pulumi.String("STATE_ACTIVE"),
 * 			Region:      pulumi.String("us-central1"),
 * 			Workload: &dataflow.PipelineWorkloadArgs{
 * 				DataflowLaunchTemplateRequest: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestArgs{
 * 					ProjectId: pulumi.String("my-project"),
 * 					GcsPath:   pulumi.String("gs://my-bucket/path"),
 * 					LaunchParameters: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs{
 * 						JobName: pulumi.String("my-job"),
 * 						Parameters: pulumi.StringMap{
 * 							"name": pulumi.String("wrench"),
 * 						},
 * 						Environment: &dataflow.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs{
 * 							NumWorkers:              pulumi.Int(5),
 * 							MaxWorkers:              pulumi.Int(5),
 * 							Zone:                    pulumi.String("us-centra1-a"),
 * 							ServiceAccountEmail:     serviceAccount.Email,
 * 							Network:                 pulumi.String("default"),
 * 							TempLocation:            pulumi.String("gs://my-bucket/tmp_dir"),
 * 							BypassTempDirValidation: pulumi.Bool(false),
 * 							MachineType:             pulumi.String("E2"),
 * 							AdditionalUserLabels: pulumi.StringMap{
 * 								"context": pulumi.String("test"),
 * 							},
 * 							WorkerRegion:          pulumi.String("us-central1"),
 * 							WorkerZone:            pulumi.String("us-central1-a"),
 * 							EnableStreamingEngine: pulumi.Bool(false),
 * 						},
 * 						Update: pulumi.Bool(false),
 * 						TransformNameMapping: pulumi.StringMap{
 * 							"name": pulumi.String("wrench"),
 * 						},
 * 					},
 * 					Location: pulumi.String("us-central1"),
 * 				},
 * 			},
 * 			ScheduleInfo: &dataflow.PipelineScheduleInfoArgs{
 * 				Schedule: pulumi.String("* */2 * * *"),
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.gcp.serviceaccount.Account;
 * import com.pulumi.gcp.serviceaccount.AccountArgs;
 * import com.pulumi.gcp.dataflow.Pipeline;
 * import com.pulumi.gcp.dataflow.PipelineArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs;
 * import com.pulumi.gcp.dataflow.inputs.PipelineScheduleInfoArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var serviceAccount = new Account("serviceAccount", AccountArgs.builder()
 *             .accountId("my-account")
 *             .displayName("Service Account")
 *             .build());
 *         var primary = new Pipeline("primary", PipelineArgs.builder()
 *             .name("my-pipeline")
 *             .displayName("my-pipeline")
 *             .type("PIPELINE_TYPE_BATCH")
 *             .state("STATE_ACTIVE")
 *             .region("us-central1")
 *             .workload(PipelineWorkloadArgs.builder()
 *                 .dataflowLaunchTemplateRequest(PipelineWorkloadDataflowLaunchTemplateRequestArgs.builder()
 *                     .projectId("my-project")
 *                     .gcsPath("gs://my-bucket/path")
 *                     .launchParameters(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersArgs.builder()
 *                         .jobName("my-job")
 *                         .parameters(Map.of("name", "wrench"))
 *                         .environment(PipelineWorkloadDataflowLaunchTemplateRequestLaunchParametersEnvironmentArgs.builder()
 *                             .numWorkers(5)
 *                             .maxWorkers(5)
 *                             .zone("us-centra1-a")
 *                             .serviceAccountEmail(serviceAccount.email())
 *                             .network("default")
 *                             .tempLocation("gs://my-bucket/tmp_dir")
 *                             .bypassTempDirValidation(false)
 *                             .machineType("E2")
 *                             .additionalUserLabels(Map.of("context", "test"))
 *                             .workerRegion("us-central1")
 *                             .workerZone("us-central1-a")
 *                             .enableStreamingEngine("false")
 *                             .build())
 *                         .update(false)
 *                         .transformNameMapping(Map.of("name", "wrench"))
 *                         .build())
 *                     .location("us-central1")
 *                     .build())
 *                 .build())
 *             .scheduleInfo(PipelineScheduleInfoArgs.builder()
 *                 .schedule("* */2 * * *")
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   serviceAccount:
 *     type: gcp:serviceaccount:Account
 *     name: service_account
 *     properties:
 *       accountId: my-account
 *       displayName: Service Account
 *   primary:
 *     type: gcp:dataflow:Pipeline
 *     properties:
 *       name: my-pipeline
 *       displayName: my-pipeline
 *       type: PIPELINE_TYPE_BATCH
 *       state: STATE_ACTIVE
 *       region: us-central1
 *       workload:
 *         dataflowLaunchTemplateRequest:
 *           projectId: my-project
 *           gcsPath: gs://my-bucket/path
 *           launchParameters:
 *             jobName: my-job
 *             parameters:
 *               name: wrench
 *             environment:
 *               numWorkers: 5
 *               maxWorkers: 5
 *               zone: us-centra1-a
 *               serviceAccountEmail: ${serviceAccount.email}
 *               network: default
 *               tempLocation: gs://my-bucket/tmp_dir
 *               bypassTempDirValidation: false
 *               machineType: E2
 *               additionalUserLabels:
 *                 context: test
 *               workerRegion: us-central1
 *               workerZone: us-central1-a
 *               enableStreamingEngine: 'false'
 *             update: false
 *             transformNameMapping:
 *               name: wrench
 *           location: us-central1
 *       scheduleInfo:
 *         schedule: '* */2 * * *'
 * ```
 * 
 * ## Import
 * Pipeline can be imported using any of these accepted formats:
 * * `projects/{{project}}/locations/{{region}}/pipelines/{{name}}`
 * * `{{project}}/{{region}}/{{name}}`
 * * `{{region}}/{{name}}`
 * * `{{name}}`
 * When using the `pulumi import` command, Pipeline can be imported using one of the formats above. For example:
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default projects/{{project}}/locations/{{region}}/pipelines/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{project}}/{{region}}/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{region}}/{{name}}
 * ```
 * ```sh
 * $ pulumi import gcp:dataflow/pipeline:Pipeline default {{name}}
 * ```
 * @property displayName The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
 * @property name "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID."
 * "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects."
 * "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions."
 * "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
 * @property pipelineSources The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation.
 * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
 * @property project The ID of the project in which the resource belongs.
 * If it is not provided, the provider project is used.
 * @property region A reference to the region
 * @property scheduleInfo Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally.
 * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec
 * Structure is documented below.
 * @property schedulerServiceAccountEmail Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
 * @property state The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
 * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
 * Possible values are: `STATE_UNSPECIFIED`, `STATE_RESUMING`, `STATE_ACTIVE`, `STATE_STOPPING`, `STATE_ARCHIVED`, `STATE_PAUSED`.
 * - - -
 * @property type The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
 * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
 * Possible values are: `PIPELINE_TYPE_UNSPECIFIED`, `PIPELINE_TYPE_BATCH`, `PIPELINE_TYPE_STREAMING`.
 * @property workload Workload information for creating new jobs.
 * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload
 * Structure is documented below.
 */
public data class PipelineArgs(
    public val displayName: Output? = null,
    public val name: Output? = null,
    public val pipelineSources: Output>? = null,
    public val project: Output? = null,
    public val region: Output? = null,
    public val scheduleInfo: Output? = null,
    public val schedulerServiceAccountEmail: Output? = null,
    public val state: Output? = null,
    public val type: Output? = null,
    public val workload: Output? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.gcp.dataflow.PipelineArgs =
        com.pulumi.gcp.dataflow.PipelineArgs.builder()
            .displayName(displayName?.applyValue({ args0 -> args0 }))
            .name(name?.applyValue({ args0 -> args0 }))
            .pipelineSources(
                pipelineSources?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.key.to(args0.value)
                    }).toMap()
                }),
            )
            .project(project?.applyValue({ args0 -> args0 }))
            .region(region?.applyValue({ args0 -> args0 }))
            .scheduleInfo(scheduleInfo?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .schedulerServiceAccountEmail(schedulerServiceAccountEmail?.applyValue({ args0 -> args0 }))
            .state(state?.applyValue({ args0 -> args0 }))
            .type(type?.applyValue({ args0 -> args0 }))
            .workload(workload?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) })).build()
}

/**
 * Builder for [PipelineArgs].
 */
@PulumiTagMarker
public class PipelineArgsBuilder internal constructor() {
    private var displayName: Output? = null

    private var name: Output? = null

    private var pipelineSources: Output>? = null

    private var project: Output? = null

    private var region: Output? = null

    private var scheduleInfo: Output? = null

    private var schedulerServiceAccountEmail: Output? = null

    private var state: Output? = null

    private var type: Output? = null

    private var workload: Output? = null

    /**
     * @param value The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
     */
    @JvmName("owhbdujolveetogk")
    public suspend fun displayName(`value`: Output) {
        this.displayName = value
    }

    /**
     * @param value "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID."
     * "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects."
     * "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions."
     * "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
     */
    @JvmName("btldmlebipvudifi")
    public suspend fun name(`value`: Output) {
        this.name = value
    }

    /**
     * @param value The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation.
     * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
     */
    @JvmName("fvnbdrayukroirau")
    public suspend fun pipelineSources(`value`: Output>) {
        this.pipelineSources = value
    }

    /**
     * @param value The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    @JvmName("pwwvwldrtucakxiy")
    public suspend fun project(`value`: Output) {
        this.project = value
    }

    /**
     * @param value A reference to the region
     */
    @JvmName("yndrskdhlauiyadt")
    public suspend fun region(`value`: Output) {
        this.region = value
    }

    /**
     * @param value Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec
     * Structure is documented below.
     */
    @JvmName("dlifmrjeghjfhsny")
    public suspend fun scheduleInfo(`value`: Output) {
        this.scheduleInfo = value
    }

    /**
     * @param value Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
     */
    @JvmName("xccvbxkjnckcbeyo")
    public suspend fun schedulerServiceAccountEmail(`value`: Output) {
        this.schedulerServiceAccountEmail = value
    }

    /**
     * @param value The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
     * Possible values are: `STATE_UNSPECIFIED`, `STATE_RESUMING`, `STATE_ACTIVE`, `STATE_STOPPING`, `STATE_ARCHIVED`, `STATE_PAUSED`.
     * - - -
     */
    @JvmName("vjrxbybqnxxujpnp")
    public suspend fun state(`value`: Output) {
        this.state = value
    }

    /**
     * @param value The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
     * Possible values are: `PIPELINE_TYPE_UNSPECIFIED`, `PIPELINE_TYPE_BATCH`, `PIPELINE_TYPE_STREAMING`.
     */
    @JvmName("fdyndlgbnrqqjcgn")
    public suspend fun type(`value`: Output) {
        this.type = value
    }

    /**
     * @param value Workload information for creating new jobs.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload
     * Structure is documented below.
     */
    @JvmName("smleqjidpuqjnsqo")
    public suspend fun workload(`value`: Output) {
        this.workload = value
    }

    /**
     * @param value The display name of the pipeline. It can contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), and underscores (_).
     */
    @JvmName("lbkqruguymhblxem")
    public suspend fun displayName(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.displayName = mapped
    }

    /**
     * @param value "The pipeline name. For example': 'projects/PROJECT_ID/locations/LOCATION_ID/pipelines/PIPELINE_ID."
     * "- PROJECT_ID can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), and periods (.). For more information, see Identifying projects."
     * "LOCATION_ID is the canonical ID for the pipeline's location. The list of available locations can be obtained by calling google.cloud.location.Locations.ListLocations. Note that the Data Pipelines service is not available in all regions. It depends on Cloud Scheduler, an App Engine application, so it's only available in App Engine regions."
     * "PIPELINE_ID is the ID of the pipeline. Must be unique for the selected project and location."
     */
    @JvmName("cviiihdthugorbov")
    public suspend fun name(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.name = mapped
    }

    /**
     * @param value The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation.
     * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
     */
    @JvmName("atlwqdvcdkmojvgb")
    public suspend fun pipelineSources(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.pipelineSources = mapped
    }

    /**
     * @param values The sources of the pipeline (for example, Dataplex). The keys and values are set by the corresponding sources during pipeline creation.
     * An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
     */
    @JvmName("vilpvkwcluiuhudx")
    public fun pipelineSources(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.pipelineSources = mapped
    }

    /**
     * @param value The ID of the project in which the resource belongs.
     * If it is not provided, the provider project is used.
     */
    @JvmName("iclolidewidkpmln")
    public suspend fun project(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.project = mapped
    }

    /**
     * @param value A reference to the region
     */
    @JvmName("jejrnjhhuveuyiei")
    public suspend fun region(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.region = mapped
    }

    /**
     * @param value Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec
     * Structure is documented below.
     */
    @JvmName("dllqghxcittcfdvm")
    public suspend fun scheduleInfo(`value`: PipelineScheduleInfoArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.scheduleInfo = mapped
    }

    /**
     * @param argument Internal scheduling information for a pipeline. If this information is provided, periodic jobs will be created per the schedule. If not, users are responsible for creating jobs externally.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#schedulespec
     * Structure is documented below.
     */
    @JvmName("cvntqgqdpdthqfml")
    public suspend fun scheduleInfo(argument: suspend PipelineScheduleInfoArgsBuilder.() -> Unit) {
        val toBeMapped = PipelineScheduleInfoArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.scheduleInfo = mapped
    }

    /**
     * @param value Optional. A service account email to be used with the Cloud Scheduler job. If not specified, the default compute engine service account will be used.
     */
    @JvmName("xlpgdqacdluqiamn")
    public suspend fun schedulerServiceAccountEmail(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.schedulerServiceAccountEmail = mapped
    }

    /**
     * @param value The state of the pipeline. When the pipeline is created, the state is set to 'PIPELINE_STATE_ACTIVE' by default. State changes can be requested by setting the state to stopping, paused, or resuming. State cannot be changed through pipelines.patch requests.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#state
     * Possible values are: `STATE_UNSPECIFIED`, `STATE_RESUMING`, `STATE_ACTIVE`, `STATE_STOPPING`, `STATE_ARCHIVED`, `STATE_PAUSED`.
     * - - -
     */
    @JvmName("aftdwribacvtmlgc")
    public suspend fun state(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.state = mapped
    }

    /**
     * @param value The type of the pipeline. This field affects the scheduling of the pipeline and the type of metrics to show for the pipeline.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#pipelinetype
     * Possible values are: `PIPELINE_TYPE_UNSPECIFIED`, `PIPELINE_TYPE_BATCH`, `PIPELINE_TYPE_STREAMING`.
     */
    @JvmName("pusojelschduebyx")
    public suspend fun type(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.type = mapped
    }

    /**
     * @param value Workload information for creating new jobs.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload
     * Structure is documented below.
     */
    @JvmName("qhftxkkvytlodklf")
    public suspend fun workload(`value`: PipelineWorkloadArgs?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.workload = mapped
    }

    /**
     * @param argument Workload information for creating new jobs.
     * https://cloud.google.com/dataflow/docs/reference/data-pipelines/rest/v1/projects.locations.pipelines#workload
     * Structure is documented below.
     */
    @JvmName("phwpxsqumcabihoe")
    public suspend fun workload(argument: suspend PipelineWorkloadArgsBuilder.() -> Unit) {
        val toBeMapped = PipelineWorkloadArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.workload = mapped
    }

    internal fun build(): PipelineArgs = PipelineArgs(
        displayName = displayName,
        name = name,
        pipelineSources = pipelineSources,
        project = project,
        region = region,
        scheduleInfo = scheduleInfo,
        schedulerServiceAccountEmail = schedulerServiceAccountEmail,
        state = state,
        type = type,
        workload = workload,
    )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy