All Downloads are FREE. Search and download functionalities are using the official Maven repository.

software.amazon.awssdk.services.glue.model.JobUpdate Maven / Gradle / Ivy

Go to download

The AWS Java SDK for AWS Glue module holds the client classes that are used for communicating with AWS Glue Service

There is a newer version: 2.29.39
Show newest version
/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */

package software.amazon.awssdk.services.glue.model;

import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.core.SdkField;
import software.amazon.awssdk.core.SdkPojo;
import software.amazon.awssdk.core.protocol.MarshallLocation;
import software.amazon.awssdk.core.protocol.MarshallingType;
import software.amazon.awssdk.core.traits.LocationTrait;
import software.amazon.awssdk.core.traits.MapTrait;
import software.amazon.awssdk.core.util.DefaultSdkAutoConstructMap;
import software.amazon.awssdk.core.util.SdkAutoConstructMap;
import software.amazon.awssdk.utils.ToString;
import software.amazon.awssdk.utils.builder.CopyableBuilder;
import software.amazon.awssdk.utils.builder.ToCopyableBuilder;

/**
 * 

* Specifies information used to update an existing job definition. The previous job definition is completely * overwritten by this information. *

*/ @Generated("software.amazon.awssdk:codegen") public final class JobUpdate implements SdkPojo, Serializable, ToCopyableBuilder { private static final SdkField JOB_MODE_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("JobMode").getter(getter(JobUpdate::jobModeAsString)).setter(setter(Builder::jobMode)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("JobMode").build()).build(); private static final SdkField JOB_RUN_QUEUING_ENABLED_FIELD = SdkField. builder(MarshallingType.BOOLEAN) .memberName("JobRunQueuingEnabled").getter(getter(JobUpdate::jobRunQueuingEnabled)) .setter(setter(Builder::jobRunQueuingEnabled)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("JobRunQueuingEnabled").build()) .build(); private static final SdkField DESCRIPTION_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("Description").getter(getter(JobUpdate::description)).setter(setter(Builder::description)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Description").build()).build(); private static final SdkField LOG_URI_FIELD = SdkField. builder(MarshallingType.STRING).memberName("LogUri") .getter(getter(JobUpdate::logUri)).setter(setter(Builder::logUri)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("LogUri").build()).build(); private static final SdkField ROLE_FIELD = SdkField. builder(MarshallingType.STRING).memberName("Role") .getter(getter(JobUpdate::role)).setter(setter(Builder::role)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Role").build()).build(); private static final SdkField EXECUTION_PROPERTY_FIELD = SdkField . builder(MarshallingType.SDK_POJO).memberName("ExecutionProperty") .getter(getter(JobUpdate::executionProperty)).setter(setter(Builder::executionProperty)) .constructor(ExecutionProperty::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ExecutionProperty").build()).build(); private static final SdkField COMMAND_FIELD = SdkField. builder(MarshallingType.SDK_POJO) .memberName("Command").getter(getter(JobUpdate::command)).setter(setter(Builder::command)) .constructor(JobCommand::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Command").build()).build(); private static final SdkField> DEFAULT_ARGUMENTS_FIELD = SdkField .> builder(MarshallingType.MAP) .memberName("DefaultArguments") .getter(getter(JobUpdate::defaultArguments)) .setter(setter(Builder::defaultArguments)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("DefaultArguments").build(), MapTrait.builder() .keyLocationName("key") .valueLocationName("value") .valueFieldInfo( SdkField. builder(MarshallingType.STRING) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) .locationName("value").build()).build()).build()).build(); private static final SdkField> NON_OVERRIDABLE_ARGUMENTS_FIELD = SdkField .> builder(MarshallingType.MAP) .memberName("NonOverridableArguments") .getter(getter(JobUpdate::nonOverridableArguments)) .setter(setter(Builder::nonOverridableArguments)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NonOverridableArguments").build(), MapTrait.builder() .keyLocationName("key") .valueLocationName("value") .valueFieldInfo( SdkField. builder(MarshallingType.STRING) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) .locationName("value").build()).build()).build()).build(); private static final SdkField CONNECTIONS_FIELD = SdkField . builder(MarshallingType.SDK_POJO).memberName("Connections").getter(getter(JobUpdate::connections)) .setter(setter(Builder::connections)).constructor(ConnectionsList::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Connections").build()).build(); private static final SdkField MAX_RETRIES_FIELD = SdkField. builder(MarshallingType.INTEGER) .memberName("MaxRetries").getter(getter(JobUpdate::maxRetries)).setter(setter(Builder::maxRetries)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MaxRetries").build()).build(); private static final SdkField ALLOCATED_CAPACITY_FIELD = SdkField. builder(MarshallingType.INTEGER) .memberName("AllocatedCapacity").getter(getter(JobUpdate::allocatedCapacity)) .setter(setter(Builder::allocatedCapacity)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("AllocatedCapacity").build()).build(); private static final SdkField TIMEOUT_FIELD = SdkField. builder(MarshallingType.INTEGER) .memberName("Timeout").getter(getter(JobUpdate::timeout)).setter(setter(Builder::timeout)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Timeout").build()).build(); private static final SdkField MAX_CAPACITY_FIELD = SdkField. builder(MarshallingType.DOUBLE) .memberName("MaxCapacity").getter(getter(JobUpdate::maxCapacity)).setter(setter(Builder::maxCapacity)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MaxCapacity").build()).build(); private static final SdkField WORKER_TYPE_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("WorkerType").getter(getter(JobUpdate::workerTypeAsString)).setter(setter(Builder::workerType)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("WorkerType").build()).build(); private static final SdkField NUMBER_OF_WORKERS_FIELD = SdkField. builder(MarshallingType.INTEGER) .memberName("NumberOfWorkers").getter(getter(JobUpdate::numberOfWorkers)).setter(setter(Builder::numberOfWorkers)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NumberOfWorkers").build()).build(); private static final SdkField SECURITY_CONFIGURATION_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("SecurityConfiguration").getter(getter(JobUpdate::securityConfiguration)) .setter(setter(Builder::securityConfiguration)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SecurityConfiguration").build()) .build(); private static final SdkField NOTIFICATION_PROPERTY_FIELD = SdkField . builder(MarshallingType.SDK_POJO).memberName("NotificationProperty") .getter(getter(JobUpdate::notificationProperty)).setter(setter(Builder::notificationProperty)) .constructor(NotificationProperty::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("NotificationProperty").build()) .build(); private static final SdkField GLUE_VERSION_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("GlueVersion").getter(getter(JobUpdate::glueVersion)).setter(setter(Builder::glueVersion)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("GlueVersion").build()).build(); private static final SdkField> CODE_GEN_CONFIGURATION_NODES_FIELD = SdkField .> builder(MarshallingType.MAP) .memberName("CodeGenConfigurationNodes") .getter(getter(JobUpdate::codeGenConfigurationNodes)) .setter(setter(Builder::codeGenConfigurationNodes)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("CodeGenConfigurationNodes").build(), MapTrait.builder() .keyLocationName("key") .valueLocationName("value") .valueFieldInfo( SdkField. builder(MarshallingType.SDK_POJO) .constructor(CodeGenConfigurationNode::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD) .locationName("value").build()).build()).build()).build(); private static final SdkField EXECUTION_CLASS_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("ExecutionClass").getter(getter(JobUpdate::executionClassAsString)) .setter(setter(Builder::executionClass)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("ExecutionClass").build()).build(); private static final SdkField SOURCE_CONTROL_DETAILS_FIELD = SdkField . builder(MarshallingType.SDK_POJO).memberName("SourceControlDetails") .getter(getter(JobUpdate::sourceControlDetails)).setter(setter(Builder::sourceControlDetails)) .constructor(SourceControlDetails::builder) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("SourceControlDetails").build()) .build(); private static final SdkField MAINTENANCE_WINDOW_FIELD = SdkField. builder(MarshallingType.STRING) .memberName("MaintenanceWindow").getter(getter(JobUpdate::maintenanceWindow)) .setter(setter(Builder::maintenanceWindow)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MaintenanceWindow").build()).build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(JOB_MODE_FIELD, JOB_RUN_QUEUING_ENABLED_FIELD, DESCRIPTION_FIELD, LOG_URI_FIELD, ROLE_FIELD, EXECUTION_PROPERTY_FIELD, COMMAND_FIELD, DEFAULT_ARGUMENTS_FIELD, NON_OVERRIDABLE_ARGUMENTS_FIELD, CONNECTIONS_FIELD, MAX_RETRIES_FIELD, ALLOCATED_CAPACITY_FIELD, TIMEOUT_FIELD, MAX_CAPACITY_FIELD, WORKER_TYPE_FIELD, NUMBER_OF_WORKERS_FIELD, SECURITY_CONFIGURATION_FIELD, NOTIFICATION_PROPERTY_FIELD, GLUE_VERSION_FIELD, CODE_GEN_CONFIGURATION_NODES_FIELD, EXECUTION_CLASS_FIELD, SOURCE_CONTROL_DETAILS_FIELD, MAINTENANCE_WINDOW_FIELD)); private static final Map> SDK_NAME_TO_FIELD = memberNameToFieldInitializer(); private static final long serialVersionUID = 1L; private final String jobMode; private final Boolean jobRunQueuingEnabled; private final String description; private final String logUri; private final String role; private final ExecutionProperty executionProperty; private final JobCommand command; private final Map defaultArguments; private final Map nonOverridableArguments; private final ConnectionsList connections; private final Integer maxRetries; private final Integer allocatedCapacity; private final Integer timeout; private final Double maxCapacity; private final String workerType; private final Integer numberOfWorkers; private final String securityConfiguration; private final NotificationProperty notificationProperty; private final String glueVersion; private final Map codeGenConfigurationNodes; private final String executionClass; private final SourceControlDetails sourceControlDetails; private final String maintenanceWindow; private JobUpdate(BuilderImpl builder) { this.jobMode = builder.jobMode; this.jobRunQueuingEnabled = builder.jobRunQueuingEnabled; this.description = builder.description; this.logUri = builder.logUri; this.role = builder.role; this.executionProperty = builder.executionProperty; this.command = builder.command; this.defaultArguments = builder.defaultArguments; this.nonOverridableArguments = builder.nonOverridableArguments; this.connections = builder.connections; this.maxRetries = builder.maxRetries; this.allocatedCapacity = builder.allocatedCapacity; this.timeout = builder.timeout; this.maxCapacity = builder.maxCapacity; this.workerType = builder.workerType; this.numberOfWorkers = builder.numberOfWorkers; this.securityConfiguration = builder.securityConfiguration; this.notificationProperty = builder.notificationProperty; this.glueVersion = builder.glueVersion; this.codeGenConfigurationNodes = builder.codeGenConfigurationNodes; this.executionClass = builder.executionClass; this.sourceControlDetails = builder.sourceControlDetails; this.maintenanceWindow = builder.maintenanceWindow; } /** *

* A mode that describes how a job was created. Valid values are: *

*
    *
  • *

    * SCRIPT - The job was created using the Glue Studio script editor. *

    *
  • *
  • *

    * VISUAL - The job was created using the Glue Studio visual editor. *

    *
  • *
  • *

    * NOTEBOOK - The job was created using an interactive sessions notebook. *

    *
  • *
*

* When the JobMode field is missing or null, SCRIPT is assigned as the default value. *

*

* If the service returns an enum value that is not available in the current SDK version, {@link #jobMode} will * return {@link JobMode#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available from * {@link #jobModeAsString}. *

* * @return A mode that describes how a job was created. Valid values are:

*
    *
  • *

    * SCRIPT - The job was created using the Glue Studio script editor. *

    *
  • *
  • *

    * VISUAL - The job was created using the Glue Studio visual editor. *

    *
  • *
  • *

    * NOTEBOOK - The job was created using an interactive sessions notebook. *

    *
  • *
*

* When the JobMode field is missing or null, SCRIPT is assigned as the default * value. * @see JobMode */ public final JobMode jobMode() { return JobMode.fromValue(jobMode); } /** *

* A mode that describes how a job was created. Valid values are: *

*
    *
  • *

    * SCRIPT - The job was created using the Glue Studio script editor. *

    *
  • *
  • *

    * VISUAL - The job was created using the Glue Studio visual editor. *

    *
  • *
  • *

    * NOTEBOOK - The job was created using an interactive sessions notebook. *

    *
  • *
*

* When the JobMode field is missing or null, SCRIPT is assigned as the default value. *

*

* If the service returns an enum value that is not available in the current SDK version, {@link #jobMode} will * return {@link JobMode#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available from * {@link #jobModeAsString}. *

* * @return A mode that describes how a job was created. Valid values are:

*
    *
  • *

    * SCRIPT - The job was created using the Glue Studio script editor. *

    *
  • *
  • *

    * VISUAL - The job was created using the Glue Studio visual editor. *

    *
  • *
  • *

    * NOTEBOOK - The job was created using an interactive sessions notebook. *

    *
  • *
*

* When the JobMode field is missing or null, SCRIPT is assigned as the default * value. * @see JobMode */ public final String jobModeAsString() { return jobMode; } /** *

* Specifies whether job run queuing is enabled for the job runs for this job. *

*

* A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will * not be considered for queueing. *

*

* If this field does not match the value set in the job run, then the value from the job run field will be used. *

* * @return Specifies whether job run queuing is enabled for the job runs for this job.

*

* A value of true means job run queuing is enabled for the job runs. If false or not populated, the job * runs will not be considered for queueing. *

*

* If this field does not match the value set in the job run, then the value from the job run field will be * used. */ public final Boolean jobRunQueuingEnabled() { return jobRunQueuingEnabled; } /** *

* Description of the job being defined. *

* * @return Description of the job being defined. */ public final String description() { return description; } /** *

* This field is reserved for future use. *

* * @return This field is reserved for future use. */ public final String logUri() { return logUri; } /** *

* The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *

* * @return The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). */ public final String role() { return role; } /** *

* An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. *

* * @return An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. */ public final ExecutionProperty executionProperty() { return executionProperty; } /** *

* The JobCommand that runs this job (required). *

* * @return The JobCommand that runs this job (required). */ public final JobCommand command() { return command; } /** * For responses, this returns true if the service returned a value for the DefaultArguments property. This DOES NOT * check that the value is non-empty (for which, you should check the {@code isEmpty()} method on the property). * This is useful because the SDK will never return a null collection or map, but you may need to differentiate * between the service returning nothing (or null) and the service returning an empty collection or map. For * requests, this returns true if a value for the property was specified in the request builder, and false if a * value was not specified. */ public final boolean hasDefaultArguments() { return defaultArguments != null && !(defaultArguments instanceof SdkAutoConstructMap); } /** *

* The default arguments for every run of this job, specified as name-value pairs. *

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue itself * consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, * Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue APIs in * Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special Parameters * Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in Ray * jobs in the developer guide. *

*

* Attempts to modify the collection returned by this method will result in an UnsupportedOperationException. *

*

* This method will never return null. If you would like to know whether the service returned this field (so that * you can differentiate between null and empty), you can use the {@link #hasDefaultArguments} method. *

* * @return The default arguments for every run of this job, specified as name-value pairs.

*

* You can specify arguments here that your own job-execution script consumes, as well as arguments that * Glue itself consumes. *

*

* Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *

*

* For information about how to specify and consume your own Job arguments, see the Calling Glue * APIs in Python topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *

*

* For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters * in Ray jobs in the developer guide. */ public final Map defaultArguments() { return defaultArguments; } /** * For responses, this returns true if the service returned a value for the NonOverridableArguments property. This * DOES NOT check that the value is non-empty (for which, you should check the {@code isEmpty()} method on the * property). This is useful because the SDK will never return a null collection or map, but you may need to * differentiate between the service returning nothing (or null) and the service returning an empty collection or * map. For requests, this returns true if a value for the property was specified in the request builder, and false * if a value was not specified. */ public final boolean hasNonOverridableArguments() { return nonOverridableArguments != null && !(nonOverridableArguments instanceof SdkAutoConstructMap); } /** *

* Arguments for this job that are not overridden when providing job arguments in a job run, specified as name-value * pairs. *

*

* Attempts to modify the collection returned by this method will result in an UnsupportedOperationException. *

*

* This method will never return null. If you would like to know whether the service returned this field (so that * you can differentiate between null and empty), you can use the {@link #hasNonOverridableArguments} method. *

* * @return Arguments for this job that are not overridden when providing job arguments in a job run, specified as * name-value pairs. */ public final Map nonOverridableArguments() { return nonOverridableArguments; } /** *

* The connections used for this job. *

* * @return The connections used for this job. */ public final ConnectionsList connections() { return connections; } /** *

* The maximum number of times to retry this job if it fails. *

* * @return The maximum number of times to retry this job if it fails. */ public final Integer maxRetries() { return maxRetries; } /** *

* This field is deprecated. Use MaxCapacity instead. *

*

* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 DPUs; * the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity * and 16 GB of memory. For more information, see the Glue pricing * page. *

* * @return This field is deprecated. Use MaxCapacity instead.

*

* The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 * DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. * @deprecated This property is deprecated, use MaxCapacity instead. */ @Deprecated public final Integer allocatedCapacity() { return allocatedCapacity; } /** *

* The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated * and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. *

*

* Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job * will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance * window, it will be restarted during the maintenance window after 7 days. *

* * @return The job timeout in minutes. This is the maximum time that a job run can consume resources before it is * terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch * jobs.

*

* Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, * the job will be restarted after 7 days based if you have not setup a maintenance window. If you have * setup maintenance window, it will be restarted during the maintenance window after 7 days. */ public final Integer timeout() { return timeout; } /** *

* For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units * (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of * 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

*

* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

*

* Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

*

* The value that can be allocated for MaxCapacity depends on whether you are running a Python shell * job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

*
    *
  • *

    * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 * or 1 DPU. The default is 0.0625 DPU. *

    *
  • *
  • *

    * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL * job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. * This job type cannot have a fractional DPU allocation. *

    *
  • *
* * @return For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing * units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power * that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.

*

* For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should * specify a Worker type and the Number of workers. *

*

* Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

*

* The value that can be allocated for MaxCapacity depends on whether you are running a Python * shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

*
    *
  • *

    * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either * 0.0625 or 1 DPU. The default is 0.0625 DPU. *

    *
  • *
  • *

    * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark * streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. * The default is 10 DPUs. This job type cannot have a fractional DPU allocation. *

    *
  • */ public final Double maxCapacity() { return maxCapacity; } /** *

    * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

    *
      *
    • *

      * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and * provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and * queries, to offers a scalable and cost effective way to run most jobs. *

      *
    • *
    • *

      * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and * provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and * queries, to offers a scalable and cost effective way to run most jobs. *

      *
    • *
    • *

      * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and * provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 * or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US * West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe * (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

      *
    • *
    • *

      * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, * and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 * or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker * type. *

      *
    • *
    • *

      * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, * and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type * is only available for Glue version 3.0 or later streaming jobs. *

      *
    • *
    • *

      * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, * and provides up to 8 Ray workers based on the autoscaler. *

      *
    • *
    *

    * If the service returns an enum value that is not available in the current SDK version, {@link #workerType} will * return {@link WorkerType#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available from * {@link #workerTypeAsString}. *

    * * @return The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, * G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

    *
      *
    • *

      * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

      *
    • *
    • *

      * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

      *
    • *
    • *

      * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain * your most demanding transforms, aggregations, joins, and queries. This worker type is available only for * Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US * East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific * (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

      *
    • *
    • *

      * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain * your most demanding transforms, aggregations, joins, and queries. This worker type is available only for * Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

      *
    • *
    • *

      * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. * This worker type is only available for Glue version 3.0 or later streaming jobs. *

      *
    • *
    • *

      * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk, and provides up to 8 Ray workers based on the autoscaler. *

      *
    • * @see WorkerType */ public final WorkerType workerType() { return WorkerType.fromValue(workerType); } /** *

      * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

      *
        *
      • *

        * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and * provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and * queries, to offers a scalable and cost effective way to run most jobs. *

        *
      • *
      • *

        * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and * provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and * queries, to offers a scalable and cost effective way to run most jobs. *

        *
      • *
      • *

        * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and * provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 * or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US * West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe * (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

        *
      • *
      • *

        * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, * and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 * or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker * type. *

        *
      • *
      • *

        * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, * and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type * is only available for Glue version 3.0 or later streaming jobs. *

        *
      • *
      • *

        * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, * and provides up to 8 Ray workers based on the autoscaler. *

        *
      • *
      *

      * If the service returns an enum value that is not available in the current SDK version, {@link #workerType} will * return {@link WorkerType#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available from * {@link #workerTypeAsString}. *

      * * @return The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, * G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

      *
        *
      • *

        * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

        *
      • *
      • *

        * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

        *
      • *
      • *

        * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain * your most demanding transforms, aggregations, joins, and queries. This worker type is available only for * Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US * East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific * (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

        *
      • *
      • *

        * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain * your most demanding transforms, aggregations, joins, and queries. This worker type is available only for * Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

        *
      • *
      • *

        * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. * This worker type is only available for Glue version 3.0 or later streaming jobs. *

        *
      • *
      • *

        * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk, and provides up to 8 Ray workers based on the autoscaler. *

        *
      • * @see WorkerType */ public final String workerTypeAsString() { return workerType; } /** *

        * The number of workers of a defined workerType that are allocated when a job runs. *

        * * @return The number of workers of a defined workerType that are allocated when a job runs. */ public final Integer numberOfWorkers() { return numberOfWorkers; } /** *

        * The name of the SecurityConfiguration structure to be used with this job. *

        * * @return The name of the SecurityConfiguration structure to be used with this job. */ public final String securityConfiguration() { return securityConfiguration; } /** *

        * Specifies the configuration properties of a job notification. *

        * * @return Specifies the configuration properties of a job notification. */ public final NotificationProperty notificationProperty() { return notificationProperty; } /** *

        * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in * a job. The Python version indicates the version supported for jobs of type Spark. *

        *

        * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python * and additional libraries available in your Ray job are determined by the Runtime parameter of the * Job command. *

        *

        * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

        *

        * Jobs that are created without specifying a Glue version default to Glue 0.9. *

        * * @return In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark.

        *

        * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of * Ray, Python and additional libraries available in your Ray job are determined by the Runtime * parameter of the Job command. *

        *

        * For more information about the available Glue versions and corresponding Spark and Python versions, see * Glue version in the developer * guide. *

        *

        * Jobs that are created without specifying a Glue version default to Glue 0.9. */ public final String glueVersion() { return glueVersion; } /** * For responses, this returns true if the service returned a value for the CodeGenConfigurationNodes property. This * DOES NOT check that the value is non-empty (for which, you should check the {@code isEmpty()} method on the * property). This is useful because the SDK will never return a null collection or map, but you may need to * differentiate between the service returning nothing (or null) and the service returning an empty collection or * map. For requests, this returns true if a value for the property was specified in the request builder, and false * if a value was not specified. */ public final boolean hasCodeGenConfigurationNodes() { return codeGenConfigurationNodes != null && !(codeGenConfigurationNodes instanceof SdkAutoConstructMap); } /** *

        * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *

        *

        * Attempts to modify the collection returned by this method will result in an UnsupportedOperationException. *

        *

        * This method will never return null. If you would like to know whether the service returned this field (so that * you can differentiate between null and empty), you can use the {@link #hasCodeGenConfigurationNodes} method. *

        * * @return The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue * Studio code generation is based. */ public final Map codeGenConfigurationNodes() { return codeGenConfigurationNodes; } /** *

        * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

        *

        * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

        *

        * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

        *

        * If the service returns an enum value that is not available in the current SDK version, {@link #executionClass} * will return {@link ExecutionClass#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available * from {@link #executionClassAsString}. *

        * * @return Indicates whether the job is run with a standard or flexible execution class. The standard * execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated * resources.

        *

        * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *

        *

        * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass */ public final ExecutionClass executionClass() { return ExecutionClass.fromValue(executionClass); } /** *

        * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

        *

        * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. *

        *

        * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

        *

        * If the service returns an enum value that is not available in the current SDK version, {@link #executionClass} * will return {@link ExecutionClass#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available * from {@link #executionClassAsString}. *

        * * @return Indicates whether the job is run with a standard or flexible execution class. The standard * execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated * resources.

        *

        * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *

        *

        * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass */ public final String executionClassAsString() { return executionClass; } /** *

        * The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a * remote repository. *

        * * @return The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. */ public final SourceControlDetails sourceControlDetails() { return sourceControlDetails; } /** *

        * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically * performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. *

        *

        * Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the * maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. *

        * * @return This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue * periodically performs maintenance activities. During these maintenance windows, Glue will need to restart * your streaming jobs.

        *

        * Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up * the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to * 1:00PM GMT. */ public final String maintenanceWindow() { return maintenanceWindow; } @Override public Builder toBuilder() { return new BuilderImpl(this); } public static Builder builder() { return new BuilderImpl(); } public static Class serializableBuilderClass() { return BuilderImpl.class; } @Override public final int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + Objects.hashCode(jobModeAsString()); hashCode = 31 * hashCode + Objects.hashCode(jobRunQueuingEnabled()); hashCode = 31 * hashCode + Objects.hashCode(description()); hashCode = 31 * hashCode + Objects.hashCode(logUri()); hashCode = 31 * hashCode + Objects.hashCode(role()); hashCode = 31 * hashCode + Objects.hashCode(executionProperty()); hashCode = 31 * hashCode + Objects.hashCode(command()); hashCode = 31 * hashCode + Objects.hashCode(hasDefaultArguments() ? defaultArguments() : null); hashCode = 31 * hashCode + Objects.hashCode(hasNonOverridableArguments() ? nonOverridableArguments() : null); hashCode = 31 * hashCode + Objects.hashCode(connections()); hashCode = 31 * hashCode + Objects.hashCode(maxRetries()); hashCode = 31 * hashCode + Objects.hashCode(allocatedCapacity()); hashCode = 31 * hashCode + Objects.hashCode(timeout()); hashCode = 31 * hashCode + Objects.hashCode(maxCapacity()); hashCode = 31 * hashCode + Objects.hashCode(workerTypeAsString()); hashCode = 31 * hashCode + Objects.hashCode(numberOfWorkers()); hashCode = 31 * hashCode + Objects.hashCode(securityConfiguration()); hashCode = 31 * hashCode + Objects.hashCode(notificationProperty()); hashCode = 31 * hashCode + Objects.hashCode(glueVersion()); hashCode = 31 * hashCode + Objects.hashCode(hasCodeGenConfigurationNodes() ? codeGenConfigurationNodes() : null); hashCode = 31 * hashCode + Objects.hashCode(executionClassAsString()); hashCode = 31 * hashCode + Objects.hashCode(sourceControlDetails()); hashCode = 31 * hashCode + Objects.hashCode(maintenanceWindow()); return hashCode; } @Override public final boolean equals(Object obj) { return equalsBySdkFields(obj); } @Override public final boolean equalsBySdkFields(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof JobUpdate)) { return false; } JobUpdate other = (JobUpdate) obj; return Objects.equals(jobModeAsString(), other.jobModeAsString()) && Objects.equals(jobRunQueuingEnabled(), other.jobRunQueuingEnabled()) && Objects.equals(description(), other.description()) && Objects.equals(logUri(), other.logUri()) && Objects.equals(role(), other.role()) && Objects.equals(executionProperty(), other.executionProperty()) && Objects.equals(command(), other.command()) && hasDefaultArguments() == other.hasDefaultArguments() && Objects.equals(defaultArguments(), other.defaultArguments()) && hasNonOverridableArguments() == other.hasNonOverridableArguments() && Objects.equals(nonOverridableArguments(), other.nonOverridableArguments()) && Objects.equals(connections(), other.connections()) && Objects.equals(maxRetries(), other.maxRetries()) && Objects.equals(allocatedCapacity(), other.allocatedCapacity()) && Objects.equals(timeout(), other.timeout()) && Objects.equals(maxCapacity(), other.maxCapacity()) && Objects.equals(workerTypeAsString(), other.workerTypeAsString()) && Objects.equals(numberOfWorkers(), other.numberOfWorkers()) && Objects.equals(securityConfiguration(), other.securityConfiguration()) && Objects.equals(notificationProperty(), other.notificationProperty()) && Objects.equals(glueVersion(), other.glueVersion()) && hasCodeGenConfigurationNodes() == other.hasCodeGenConfigurationNodes() && Objects.equals(codeGenConfigurationNodes(), other.codeGenConfigurationNodes()) && Objects.equals(executionClassAsString(), other.executionClassAsString()) && Objects.equals(sourceControlDetails(), other.sourceControlDetails()) && Objects.equals(maintenanceWindow(), other.maintenanceWindow()); } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. */ @Override public final String toString() { return ToString.builder("JobUpdate").add("JobMode", jobModeAsString()) .add("JobRunQueuingEnabled", jobRunQueuingEnabled()).add("Description", description()).add("LogUri", logUri()) .add("Role", role()).add("ExecutionProperty", executionProperty()).add("Command", command()) .add("DefaultArguments", hasDefaultArguments() ? defaultArguments() : null) .add("NonOverridableArguments", hasNonOverridableArguments() ? nonOverridableArguments() : null) .add("Connections", connections()).add("MaxRetries", maxRetries()).add("AllocatedCapacity", allocatedCapacity()) .add("Timeout", timeout()).add("MaxCapacity", maxCapacity()).add("WorkerType", workerTypeAsString()) .add("NumberOfWorkers", numberOfWorkers()).add("SecurityConfiguration", securityConfiguration()) .add("NotificationProperty", notificationProperty()).add("GlueVersion", glueVersion()) .add("CodeGenConfigurationNodes", codeGenConfigurationNodes() == null ? null : "*** Sensitive Data Redacted ***") .add("ExecutionClass", executionClassAsString()).add("SourceControlDetails", sourceControlDetails()) .add("MaintenanceWindow", maintenanceWindow()).build(); } public final Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { case "JobMode": return Optional.ofNullable(clazz.cast(jobModeAsString())); case "JobRunQueuingEnabled": return Optional.ofNullable(clazz.cast(jobRunQueuingEnabled())); case "Description": return Optional.ofNullable(clazz.cast(description())); case "LogUri": return Optional.ofNullable(clazz.cast(logUri())); case "Role": return Optional.ofNullable(clazz.cast(role())); case "ExecutionProperty": return Optional.ofNullable(clazz.cast(executionProperty())); case "Command": return Optional.ofNullable(clazz.cast(command())); case "DefaultArguments": return Optional.ofNullable(clazz.cast(defaultArguments())); case "NonOverridableArguments": return Optional.ofNullable(clazz.cast(nonOverridableArguments())); case "Connections": return Optional.ofNullable(clazz.cast(connections())); case "MaxRetries": return Optional.ofNullable(clazz.cast(maxRetries())); case "AllocatedCapacity": return Optional.ofNullable(clazz.cast(allocatedCapacity())); case "Timeout": return Optional.ofNullable(clazz.cast(timeout())); case "MaxCapacity": return Optional.ofNullable(clazz.cast(maxCapacity())); case "WorkerType": return Optional.ofNullable(clazz.cast(workerTypeAsString())); case "NumberOfWorkers": return Optional.ofNullable(clazz.cast(numberOfWorkers())); case "SecurityConfiguration": return Optional.ofNullable(clazz.cast(securityConfiguration())); case "NotificationProperty": return Optional.ofNullable(clazz.cast(notificationProperty())); case "GlueVersion": return Optional.ofNullable(clazz.cast(glueVersion())); case "CodeGenConfigurationNodes": return Optional.ofNullable(clazz.cast(codeGenConfigurationNodes())); case "ExecutionClass": return Optional.ofNullable(clazz.cast(executionClassAsString())); case "SourceControlDetails": return Optional.ofNullable(clazz.cast(sourceControlDetails())); case "MaintenanceWindow": return Optional.ofNullable(clazz.cast(maintenanceWindow())); default: return Optional.empty(); } } @Override public final List> sdkFields() { return SDK_FIELDS; } @Override public final Map> sdkFieldNameToField() { return SDK_NAME_TO_FIELD; } private static Map> memberNameToFieldInitializer() { Map> map = new HashMap<>(); map.put("JobMode", JOB_MODE_FIELD); map.put("JobRunQueuingEnabled", JOB_RUN_QUEUING_ENABLED_FIELD); map.put("Description", DESCRIPTION_FIELD); map.put("LogUri", LOG_URI_FIELD); map.put("Role", ROLE_FIELD); map.put("ExecutionProperty", EXECUTION_PROPERTY_FIELD); map.put("Command", COMMAND_FIELD); map.put("DefaultArguments", DEFAULT_ARGUMENTS_FIELD); map.put("NonOverridableArguments", NON_OVERRIDABLE_ARGUMENTS_FIELD); map.put("Connections", CONNECTIONS_FIELD); map.put("MaxRetries", MAX_RETRIES_FIELD); map.put("AllocatedCapacity", ALLOCATED_CAPACITY_FIELD); map.put("Timeout", TIMEOUT_FIELD); map.put("MaxCapacity", MAX_CAPACITY_FIELD); map.put("WorkerType", WORKER_TYPE_FIELD); map.put("NumberOfWorkers", NUMBER_OF_WORKERS_FIELD); map.put("SecurityConfiguration", SECURITY_CONFIGURATION_FIELD); map.put("NotificationProperty", NOTIFICATION_PROPERTY_FIELD); map.put("GlueVersion", GLUE_VERSION_FIELD); map.put("CodeGenConfigurationNodes", CODE_GEN_CONFIGURATION_NODES_FIELD); map.put("ExecutionClass", EXECUTION_CLASS_FIELD); map.put("SourceControlDetails", SOURCE_CONTROL_DETAILS_FIELD); map.put("MaintenanceWindow", MAINTENANCE_WINDOW_FIELD); return Collections.unmodifiableMap(map); } private static Function getter(Function g) { return obj -> g.apply((JobUpdate) obj); } private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } public interface Builder extends SdkPojo, CopyableBuilder { /** *

        * A mode that describes how a job was created. Valid values are: *

        *
          *
        • *

          * SCRIPT - The job was created using the Glue Studio script editor. *

          *
        • *
        • *

          * VISUAL - The job was created using the Glue Studio visual editor. *

          *
        • *
        • *

          * NOTEBOOK - The job was created using an interactive sessions notebook. *

          *
        • *
        *

        * When the JobMode field is missing or null, SCRIPT is assigned as the default value. *

        * * @param jobMode * A mode that describes how a job was created. Valid values are:

        *
          *
        • *

          * SCRIPT - The job was created using the Glue Studio script editor. *

          *
        • *
        • *

          * VISUAL - The job was created using the Glue Studio visual editor. *

          *
        • *
        • *

          * NOTEBOOK - The job was created using an interactive sessions notebook. *

          *
        • *
        *

        * When the JobMode field is missing or null, SCRIPT is assigned as the default * value. * @see JobMode * @return Returns a reference to this object so that method calls can be chained together. * @see JobMode */ Builder jobMode(String jobMode); /** *

        * A mode that describes how a job was created. Valid values are: *

        *
          *
        • *

          * SCRIPT - The job was created using the Glue Studio script editor. *

          *
        • *
        • *

          * VISUAL - The job was created using the Glue Studio visual editor. *

          *
        • *
        • *

          * NOTEBOOK - The job was created using an interactive sessions notebook. *

          *
        • *
        *

        * When the JobMode field is missing or null, SCRIPT is assigned as the default value. *

        * * @param jobMode * A mode that describes how a job was created. Valid values are:

        *
          *
        • *

          * SCRIPT - The job was created using the Glue Studio script editor. *

          *
        • *
        • *

          * VISUAL - The job was created using the Glue Studio visual editor. *

          *
        • *
        • *

          * NOTEBOOK - The job was created using an interactive sessions notebook. *

          *
        • *
        *

        * When the JobMode field is missing or null, SCRIPT is assigned as the default * value. * @see JobMode * @return Returns a reference to this object so that method calls can be chained together. * @see JobMode */ Builder jobMode(JobMode jobMode); /** *

        * Specifies whether job run queuing is enabled for the job runs for this job. *

        *

        * A value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs * will not be considered for queueing. *

        *

        * If this field does not match the value set in the job run, then the value from the job run field will be * used. *

        * * @param jobRunQueuingEnabled * Specifies whether job run queuing is enabled for the job runs for this job.

        *

        * A value of true means job run queuing is enabled for the job runs. If false or not populated, the job * runs will not be considered for queueing. *

        *

        * If this field does not match the value set in the job run, then the value from the job run field will * be used. * @return Returns a reference to this object so that method calls can be chained together. */ Builder jobRunQueuingEnabled(Boolean jobRunQueuingEnabled); /** *

        * Description of the job being defined. *

        * * @param description * Description of the job being defined. * @return Returns a reference to this object so that method calls can be chained together. */ Builder description(String description); /** *

        * This field is reserved for future use. *

        * * @param logUri * This field is reserved for future use. * @return Returns a reference to this object so that method calls can be chained together. */ Builder logUri(String logUri); /** *

        * The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). *

        * * @param role * The name or Amazon Resource Name (ARN) of the IAM role associated with this job (required). * @return Returns a reference to this object so that method calls can be chained together. */ Builder role(String role); /** *

        * An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. *

        * * @param executionProperty * An ExecutionProperty specifying the maximum number of concurrent runs allowed for this * job. * @return Returns a reference to this object so that method calls can be chained together. */ Builder executionProperty(ExecutionProperty executionProperty); /** *

        * An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. *

        * This is a convenience method that creates an instance of the {@link ExecutionProperty.Builder} avoiding the * need to create one manually via {@link ExecutionProperty#builder()}. * *

        * When the {@link Consumer} completes, {@link ExecutionProperty.Builder#build()} is called immediately and its * result is passed to {@link #executionProperty(ExecutionProperty)}. * * @param executionProperty * a consumer that will call methods on {@link ExecutionProperty.Builder} * @return Returns a reference to this object so that method calls can be chained together. * @see #executionProperty(ExecutionProperty) */ default Builder executionProperty(Consumer executionProperty) { return executionProperty(ExecutionProperty.builder().applyMutation(executionProperty).build()); } /** *

        * The JobCommand that runs this job (required). *

        * * @param command * The JobCommand that runs this job (required). * @return Returns a reference to this object so that method calls can be chained together. */ Builder command(JobCommand command); /** *

        * The JobCommand that runs this job (required). *

        * This is a convenience method that creates an instance of the {@link JobCommand.Builder} avoiding the need to * create one manually via {@link JobCommand#builder()}. * *

        * When the {@link Consumer} completes, {@link JobCommand.Builder#build()} is called immediately and its result * is passed to {@link #command(JobCommand)}. * * @param command * a consumer that will call methods on {@link JobCommand.Builder} * @return Returns a reference to this object so that method calls can be chained together. * @see #command(JobCommand) */ default Builder command(Consumer command) { return command(JobCommand.builder().applyMutation(command).build()); } /** *

        * The default arguments for every run of this job, specified as name-value pairs. *

        *

        * You can specify arguments here that your own job-execution script consumes, as well as arguments that Glue * itself consumes. *

        *

        * Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the Job. *

        *

        * For information about how to specify and consume your own Job arguments, see the Calling Glue APIs * in Python topic in the developer guide. *

        *

        * For information about the arguments you can provide to this field when configuring Spark jobs, see the Special * Parameters Used by Glue topic in the developer guide. *

        *

        * For information about the arguments you can provide to this field when configuring Ray jobs, see Using job parameters in * Ray jobs in the developer guide. *

        * * @param defaultArguments * The default arguments for every run of this job, specified as name-value pairs.

        *

        * You can specify arguments here that your own job-execution script consumes, as well as arguments that * Glue itself consumes. *

        *

        * Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue * Connection, Secrets Manager or other secret management mechanism if you intend to keep them within the * Job. *

        *

        * For information about how to specify and consume your own Job arguments, see the Calling * Glue APIs in Python topic in the developer guide. *

        *

        * For information about the arguments you can provide to this field when configuring Spark jobs, see the * * Special Parameters Used by Glue topic in the developer guide. *

        *

        * For information about the arguments you can provide to this field when configuring Ray jobs, see Using job * parameters in Ray jobs in the developer guide. * @return Returns a reference to this object so that method calls can be chained together. */ Builder defaultArguments(Map defaultArguments); /** *

        * Arguments for this job that are not overridden when providing job arguments in a job run, specified as * name-value pairs. *

        * * @param nonOverridableArguments * Arguments for this job that are not overridden when providing job arguments in a job run, specified as * name-value pairs. * @return Returns a reference to this object so that method calls can be chained together. */ Builder nonOverridableArguments(Map nonOverridableArguments); /** *

        * The connections used for this job. *

        * * @param connections * The connections used for this job. * @return Returns a reference to this object so that method calls can be chained together. */ Builder connections(ConnectionsList connections); /** *

        * The connections used for this job. *

        * This is a convenience method that creates an instance of the {@link ConnectionsList.Builder} avoiding the * need to create one manually via {@link ConnectionsList#builder()}. * *

        * When the {@link Consumer} completes, {@link ConnectionsList.Builder#build()} is called immediately and its * result is passed to {@link #connections(ConnectionsList)}. * * @param connections * a consumer that will call methods on {@link ConnectionsList.Builder} * @return Returns a reference to this object so that method calls can be chained together. * @see #connections(ConnectionsList) */ default Builder connections(Consumer connections) { return connections(ConnectionsList.builder().applyMutation(connections).build()); } /** *

        * The maximum number of times to retry this job if it fails. *

        * * @param maxRetries * The maximum number of times to retry this job if it fails. * @return Returns a reference to this object so that method calls can be chained together. */ Builder maxRetries(Integer maxRetries); /** *

        * This field is deprecated. Use MaxCapacity instead. *

        *

        * The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of 2 * DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute * capacity and 16 GB of memory. For more information, see the Glue pricing page. *

        * * @param allocatedCapacity * This field is deprecated. Use MaxCapacity instead.

        *

        * The number of Glue data processing units (DPUs) to allocate to this job. You can allocate a minimum of * 2 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of * compute capacity and 16 GB of memory. For more information, see the Glue pricing page. * @return Returns a reference to this object so that method calls can be chained together. * @deprecated This property is deprecated, use MaxCapacity instead. */ @Deprecated Builder allocatedCapacity(Integer allocatedCapacity); /** *

        * The job timeout in minutes. This is the maximum time that a job run can consume resources before it is * terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. *

        *

        * Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the * job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup * maintenance window, it will be restarted during the maintenance window after 7 days. *

        * * @param timeout * The job timeout in minutes. This is the maximum time that a job run can consume resources before it is * terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch * jobs.

        *

        * Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left * blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you * have setup maintenance window, it will be restarted during the maintenance window after 7 days. * @return Returns a reference to this object so that method calls can be chained together. */ Builder timeout(Integer timeout); /** *

        * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing * units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that * consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. *

        *

        * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a * Worker type and the Number of workers. *

        *

        * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

        *

        * The value that can be allocated for MaxCapacity depends on whether you are running a Python * shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

        *
          *
        • *

          * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either * 0.0625 or 1 DPU. The default is 0.0625 DPU. *

          *
        • *
        • *

          * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming * ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is * 10 DPUs. This job type cannot have a fractional DPU allocation. *

          *
        • *
        * * @param maxCapacity * For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data * processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of * processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more * information, see the Glue pricing page.

        *

        * For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should * specify a Worker type and the Number of workers. *

        *

        * Do not set MaxCapacity if using WorkerType and NumberOfWorkers. *

        *

        * The value that can be allocated for MaxCapacity depends on whether you are running a * Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: *

        *
          *
        • *

          * When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate * either 0.0625 or 1 DPU. The default is 0.0625 DPU. *

          *
        • *
        • *

          * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark * streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. * The default is 10 DPUs. This job type cannot have a fractional DPU allocation. *

          *
        • * @return Returns a reference to this object so that method calls can be chained together. */ Builder maxCapacity(Double maxCapacity); /** *

          * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

          *
            *
          • *

            * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, * and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, * and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, * and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version * 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. * Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada * (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

            *
          • *
          • *

            * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your * most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue * version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

            *
          • *
          • *

            * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This * worker type is only available for Glue version 3.0 or later streaming jobs. *

            *
          • *
          • *

            * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk, and provides up to 8 Ray workers based on the autoscaler. *

            *
          • *
          * * @param workerType * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, * G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

          *
            *
          • *

            * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

            *
          • *
          • *

            * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with * 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads * contain your most demanding transforms, aggregations, joins, and queries. This worker type is * available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services * Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia * Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and * Europe (Stockholm). *

            *
          • *
          • *

            * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with * 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads * contain your most demanding transforms, aggregations, joins, and queries. This worker type is * available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions * as supported for the G.4X worker type. *

            *
          • *
          • *

            * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with * 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. *

            *
          • *
          • *

            * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 * GB disk, and provides up to 8 Ray workers based on the autoscaler. *

            *
          • * @see WorkerType * @return Returns a reference to this object so that method calls can be chained together. * @see WorkerType */ Builder workerType(String workerType); /** *

            * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or * G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. *

            *
              *
            • *

              * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, * and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, * and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, * joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, * and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most * demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version * 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. * Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada * (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). *

              *
            • *
            • *

              * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB * disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your * most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue * version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the * G.4X worker type. *

              *
            • *
            • *

              * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB * disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This * worker type is only available for Glue version 3.0 or later streaming jobs. *

              *
            • *
            • *

              * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB * disk, and provides up to 8 Ray workers based on the autoscaler. *

              *
            • *
            * * @param workerType * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, * G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

            *
              *
            • *

              * For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB * disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data * transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. *

              *
            • *
            • *

              * For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with * 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads * contain your most demanding transforms, aggregations, joins, and queries. This worker type is * available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services * Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia * Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and * Europe (Stockholm). *

              *
            • *
            • *

              * For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with * 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads * contain your most demanding transforms, aggregations, joins, and queries. This worker type is * available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions * as supported for the G.4X worker type. *

              *
            • *
            • *

              * For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with * 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming * jobs. This worker type is only available for Glue version 3.0 or later streaming jobs. *

              *
            • *
            • *

              * For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 * GB disk, and provides up to 8 Ray workers based on the autoscaler. *

              *
            • * @see WorkerType * @return Returns a reference to this object so that method calls can be chained together. * @see WorkerType */ Builder workerType(WorkerType workerType); /** *

              * The number of workers of a defined workerType that are allocated when a job runs. *

              * * @param numberOfWorkers * The number of workers of a defined workerType that are allocated when a job runs. * @return Returns a reference to this object so that method calls can be chained together. */ Builder numberOfWorkers(Integer numberOfWorkers); /** *

              * The name of the SecurityConfiguration structure to be used with this job. *

              * * @param securityConfiguration * The name of the SecurityConfiguration structure to be used with this job. * @return Returns a reference to this object so that method calls can be chained together. */ Builder securityConfiguration(String securityConfiguration); /** *

              * Specifies the configuration properties of a job notification. *

              * * @param notificationProperty * Specifies the configuration properties of a job notification. * @return Returns a reference to this object so that method calls can be chained together. */ Builder notificationProperty(NotificationProperty notificationProperty); /** *

              * Specifies the configuration properties of a job notification. *

              * This is a convenience method that creates an instance of the {@link NotificationProperty.Builder} avoiding * the need to create one manually via {@link NotificationProperty#builder()}. * *

              * When the {@link Consumer} completes, {@link NotificationProperty.Builder#build()} is called immediately and * its result is passed to {@link #notificationProperty(NotificationProperty)}. * * @param notificationProperty * a consumer that will call methods on {@link NotificationProperty.Builder} * @return Returns a reference to this object so that method calls can be chained together. * @see #notificationProperty(NotificationProperty) */ default Builder notificationProperty(Consumer notificationProperty) { return notificationProperty(NotificationProperty.builder().applyMutation(notificationProperty).build()); } /** *

              * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark. *

              *

              * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, * Python and additional libraries available in your Ray job are determined by the Runtime * parameter of the Job command. *

              *

              * For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. *

              *

              * Jobs that are created without specifying a Glue version default to Glue 0.9. *

              * * @param glueVersion * In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue * available in a job. The Python version indicates the version supported for jobs of type Spark.

              *

              * Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of * Ray, Python and additional libraries available in your Ray job are determined by the * Runtime parameter of the Job command. *

              *

              * For more information about the available Glue versions and corresponding Spark and Python versions, * see Glue version in the * developer guide. *

              *

              * Jobs that are created without specifying a Glue version default to Glue 0.9. * @return Returns a reference to this object so that method calls can be chained together. */ Builder glueVersion(String glueVersion); /** *

              * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio * code generation is based. *

              * * @param codeGenConfigurationNodes * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue * Studio code generation is based. * @return Returns a reference to this object so that method calls can be chained together. */ Builder codeGenConfigurationNodes(Map codeGenConfigurationNodes); /** *

              * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

              *

              * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *

              *

              * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

              * * @param executionClass * Indicates whether the job is run with a standard or flexible execution class. The standard * execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated * resources.

              *

              * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *

              *

              * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass * @return Returns a reference to this object so that method calls can be chained together. * @see ExecutionClass */ Builder executionClass(String executionClass); /** *

              * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is * ideal for time-sensitive workloads that require fast job startup and dedicated resources. *

              *

              * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may * vary. *

              *

              * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. *

              * * @param executionClass * Indicates whether the job is run with a standard or flexible execution class. The standard * execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated * resources.

              *

              * The flexible execution class is appropriate for time-insensitive jobs whose start and completion times * may vary. *

              *

              * Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set * ExecutionClass to FLEX. The flexible execution class is available for Spark * jobs. * @see ExecutionClass * @return Returns a reference to this object so that method calls can be chained together. * @see ExecutionClass */ Builder executionClass(ExecutionClass executionClass); /** *

              * The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. *

              * * @param sourceControlDetails * The details for a source control configuration for a job, allowing synchronization of job artifacts to * or from a remote repository. * @return Returns a reference to this object so that method calls can be chained together. */ Builder sourceControlDetails(SourceControlDetails sourceControlDetails); /** *

              * The details for a source control configuration for a job, allowing synchronization of job artifacts to or * from a remote repository. *

              * This is a convenience method that creates an instance of the {@link SourceControlDetails.Builder} avoiding * the need to create one manually via {@link SourceControlDetails#builder()}. * *

              * When the {@link Consumer} completes, {@link SourceControlDetails.Builder#build()} is called immediately and * its result is passed to {@link #sourceControlDetails(SourceControlDetails)}. * * @param sourceControlDetails * a consumer that will call methods on {@link SourceControlDetails.Builder} * @return Returns a reference to this object so that method calls can be chained together. * @see #sourceControlDetails(SourceControlDetails) */ default Builder sourceControlDetails(Consumer sourceControlDetails) { return sourceControlDetails(SourceControlDetails.builder().applyMutation(sourceControlDetails).build()); } /** *

              * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue * periodically performs maintenance activities. During these maintenance windows, Glue will need to restart * your streaming jobs. *

              *

              * Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the * maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. *

              * * @param maintenanceWindow * This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue * periodically performs maintenance activities. During these maintenance windows, Glue will need to * restart your streaming jobs.

              *

              * Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set * up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT * to 1:00PM GMT. * @return Returns a reference to this object so that method calls can be chained together. */ Builder maintenanceWindow(String maintenanceWindow); } static final class BuilderImpl implements Builder { private String jobMode; private Boolean jobRunQueuingEnabled; private String description; private String logUri; private String role; private ExecutionProperty executionProperty; private JobCommand command; private Map defaultArguments = DefaultSdkAutoConstructMap.getInstance(); private Map nonOverridableArguments = DefaultSdkAutoConstructMap.getInstance(); private ConnectionsList connections; private Integer maxRetries; private Integer allocatedCapacity; private Integer timeout; private Double maxCapacity; private String workerType; private Integer numberOfWorkers; private String securityConfiguration; private NotificationProperty notificationProperty; private String glueVersion; private Map codeGenConfigurationNodes = DefaultSdkAutoConstructMap.getInstance(); private String executionClass; private SourceControlDetails sourceControlDetails; private String maintenanceWindow; private BuilderImpl() { } private BuilderImpl(JobUpdate model) { jobMode(model.jobMode); jobRunQueuingEnabled(model.jobRunQueuingEnabled); description(model.description); logUri(model.logUri); role(model.role); executionProperty(model.executionProperty); command(model.command); defaultArguments(model.defaultArguments); nonOverridableArguments(model.nonOverridableArguments); connections(model.connections); maxRetries(model.maxRetries); allocatedCapacity(model.allocatedCapacity); timeout(model.timeout); maxCapacity(model.maxCapacity); workerType(model.workerType); numberOfWorkers(model.numberOfWorkers); securityConfiguration(model.securityConfiguration); notificationProperty(model.notificationProperty); glueVersion(model.glueVersion); codeGenConfigurationNodes(model.codeGenConfigurationNodes); executionClass(model.executionClass); sourceControlDetails(model.sourceControlDetails); maintenanceWindow(model.maintenanceWindow); } public final String getJobMode() { return jobMode; } public final void setJobMode(String jobMode) { this.jobMode = jobMode; } @Override public final Builder jobMode(String jobMode) { this.jobMode = jobMode; return this; } @Override public final Builder jobMode(JobMode jobMode) { this.jobMode(jobMode == null ? null : jobMode.toString()); return this; } public final Boolean getJobRunQueuingEnabled() { return jobRunQueuingEnabled; } public final void setJobRunQueuingEnabled(Boolean jobRunQueuingEnabled) { this.jobRunQueuingEnabled = jobRunQueuingEnabled; } @Override public final Builder jobRunQueuingEnabled(Boolean jobRunQueuingEnabled) { this.jobRunQueuingEnabled = jobRunQueuingEnabled; return this; } public final String getDescription() { return description; } public final void setDescription(String description) { this.description = description; } @Override public final Builder description(String description) { this.description = description; return this; } public final String getLogUri() { return logUri; } public final void setLogUri(String logUri) { this.logUri = logUri; } @Override public final Builder logUri(String logUri) { this.logUri = logUri; return this; } public final String getRole() { return role; } public final void setRole(String role) { this.role = role; } @Override public final Builder role(String role) { this.role = role; return this; } public final ExecutionProperty.Builder getExecutionProperty() { return executionProperty != null ? executionProperty.toBuilder() : null; } public final void setExecutionProperty(ExecutionProperty.BuilderImpl executionProperty) { this.executionProperty = executionProperty != null ? executionProperty.build() : null; } @Override public final Builder executionProperty(ExecutionProperty executionProperty) { this.executionProperty = executionProperty; return this; } public final JobCommand.Builder getCommand() { return command != null ? command.toBuilder() : null; } public final void setCommand(JobCommand.BuilderImpl command) { this.command = command != null ? command.build() : null; } @Override public final Builder command(JobCommand command) { this.command = command; return this; } public final Map getDefaultArguments() { if (defaultArguments instanceof SdkAutoConstructMap) { return null; } return defaultArguments; } public final void setDefaultArguments(Map defaultArguments) { this.defaultArguments = GenericMapCopier.copy(defaultArguments); } @Override public final Builder defaultArguments(Map defaultArguments) { this.defaultArguments = GenericMapCopier.copy(defaultArguments); return this; } public final Map getNonOverridableArguments() { if (nonOverridableArguments instanceof SdkAutoConstructMap) { return null; } return nonOverridableArguments; } public final void setNonOverridableArguments(Map nonOverridableArguments) { this.nonOverridableArguments = GenericMapCopier.copy(nonOverridableArguments); } @Override public final Builder nonOverridableArguments(Map nonOverridableArguments) { this.nonOverridableArguments = GenericMapCopier.copy(nonOverridableArguments); return this; } public final ConnectionsList.Builder getConnections() { return connections != null ? connections.toBuilder() : null; } public final void setConnections(ConnectionsList.BuilderImpl connections) { this.connections = connections != null ? connections.build() : null; } @Override public final Builder connections(ConnectionsList connections) { this.connections = connections; return this; } public final Integer getMaxRetries() { return maxRetries; } public final void setMaxRetries(Integer maxRetries) { this.maxRetries = maxRetries; } @Override public final Builder maxRetries(Integer maxRetries) { this.maxRetries = maxRetries; return this; } @Deprecated public final Integer getAllocatedCapacity() { return allocatedCapacity; } @Deprecated public final void setAllocatedCapacity(Integer allocatedCapacity) { this.allocatedCapacity = allocatedCapacity; } @Override @Deprecated public final Builder allocatedCapacity(Integer allocatedCapacity) { this.allocatedCapacity = allocatedCapacity; return this; } public final Integer getTimeout() { return timeout; } public final void setTimeout(Integer timeout) { this.timeout = timeout; } @Override public final Builder timeout(Integer timeout) { this.timeout = timeout; return this; } public final Double getMaxCapacity() { return maxCapacity; } public final void setMaxCapacity(Double maxCapacity) { this.maxCapacity = maxCapacity; } @Override public final Builder maxCapacity(Double maxCapacity) { this.maxCapacity = maxCapacity; return this; } public final String getWorkerType() { return workerType; } public final void setWorkerType(String workerType) { this.workerType = workerType; } @Override public final Builder workerType(String workerType) { this.workerType = workerType; return this; } @Override public final Builder workerType(WorkerType workerType) { this.workerType(workerType == null ? null : workerType.toString()); return this; } public final Integer getNumberOfWorkers() { return numberOfWorkers; } public final void setNumberOfWorkers(Integer numberOfWorkers) { this.numberOfWorkers = numberOfWorkers; } @Override public final Builder numberOfWorkers(Integer numberOfWorkers) { this.numberOfWorkers = numberOfWorkers; return this; } public final String getSecurityConfiguration() { return securityConfiguration; } public final void setSecurityConfiguration(String securityConfiguration) { this.securityConfiguration = securityConfiguration; } @Override public final Builder securityConfiguration(String securityConfiguration) { this.securityConfiguration = securityConfiguration; return this; } public final NotificationProperty.Builder getNotificationProperty() { return notificationProperty != null ? notificationProperty.toBuilder() : null; } public final void setNotificationProperty(NotificationProperty.BuilderImpl notificationProperty) { this.notificationProperty = notificationProperty != null ? notificationProperty.build() : null; } @Override public final Builder notificationProperty(NotificationProperty notificationProperty) { this.notificationProperty = notificationProperty; return this; } public final String getGlueVersion() { return glueVersion; } public final void setGlueVersion(String glueVersion) { this.glueVersion = glueVersion; } @Override public final Builder glueVersion(String glueVersion) { this.glueVersion = glueVersion; return this; } public final Map getCodeGenConfigurationNodes() { Map result = CodeGenConfigurationNodesCopier .copyToBuilder(this.codeGenConfigurationNodes); if (result instanceof SdkAutoConstructMap) { return null; } return result; } public final void setCodeGenConfigurationNodes(Map codeGenConfigurationNodes) { this.codeGenConfigurationNodes = CodeGenConfigurationNodesCopier.copyFromBuilder(codeGenConfigurationNodes); } @Override public final Builder codeGenConfigurationNodes(Map codeGenConfigurationNodes) { this.codeGenConfigurationNodes = CodeGenConfigurationNodesCopier.copy(codeGenConfigurationNodes); return this; } public final String getExecutionClass() { return executionClass; } public final void setExecutionClass(String executionClass) { this.executionClass = executionClass; } @Override public final Builder executionClass(String executionClass) { this.executionClass = executionClass; return this; } @Override public final Builder executionClass(ExecutionClass executionClass) { this.executionClass(executionClass == null ? null : executionClass.toString()); return this; } public final SourceControlDetails.Builder getSourceControlDetails() { return sourceControlDetails != null ? sourceControlDetails.toBuilder() : null; } public final void setSourceControlDetails(SourceControlDetails.BuilderImpl sourceControlDetails) { this.sourceControlDetails = sourceControlDetails != null ? sourceControlDetails.build() : null; } @Override public final Builder sourceControlDetails(SourceControlDetails sourceControlDetails) { this.sourceControlDetails = sourceControlDetails; return this; } public final String getMaintenanceWindow() { return maintenanceWindow; } public final void setMaintenanceWindow(String maintenanceWindow) { this.maintenanceWindow = maintenanceWindow; } @Override public final Builder maintenanceWindow(String maintenanceWindow) { this.maintenanceWindow = maintenanceWindow; return this; } @Override public JobUpdate build() { return new JobUpdate(this); } @Override public List> sdkFields() { return SDK_FIELDS; } @Override public Map> sdkFieldNameToField() { return SDK_NAME_TO_FIELD; } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy