com.amazonaws.services.sagemaker.model.CreateTransformJobRequest Maven / Gradle / Ivy
Show all versions of aws-java-sdk-sagemaker Show documentation
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.sagemaker.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateTransformJobRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
*
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web
* Services account.
*
*/
private String transformJobName;
/**
*
* The name of the model that you want to use for the transform job. ModelName
must be the name of an
* existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account.
*
*/
private String modelName;
/**
*
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the optional
* execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is
* not enabled, the default value is 1
. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*
*/
private Integer maxConcurrentTransforms;
/**
*
* Configures the timeout and maximum number of retries for processing a transform job invocation.
*
*/
private ModelClientConfig modelClientConfig;
/**
*
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a single
* record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To
* ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The
* default value is 6
MB.
*
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the
* value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in
* algorithms do not support HTTP chunked encoding.
*
*/
private Integer maxPayloadInMB;
/**
*
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is
* a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
*
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to
* SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
*
*/
private String batchStrategy;
/**
*
* The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.
*
*/
private java.util.Map environment;
/**
*
* Describes the input source and the way the transform job consumes it.
*
*/
private TransformInput transformInput;
/**
*
* Describes the results of the transform job.
*
*/
private TransformOutput transformOutput;
/**
*
* Configuration to control how SageMaker captures inference data.
*
*/
private BatchDataCaptureConfig dataCaptureConfig;
/**
*
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*
*/
private TransformResources transformResources;
/**
*
* The data structure used to specify the data to be used for inference in a batch transform job and to associate
* the data that is relevant to the prediction results in the output. The input filter provided allows you to
* exclude input data that is not needed for inference in a batch transform job. The output filter provided allows
* you to include input data relevant to interpreting the predictions in the output from the job. For more
* information, see Associate Prediction
* Results with their Corresponding Input Records.
*
*/
private DataProcessing dataProcessing;
/**
*
* (Optional) An array of key-value pairs. For more information, see Using
* Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*
*/
private java.util.List tags;
private ExperimentConfig experimentConfig;
/**
*
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web
* Services account.
*
*
* @param transformJobName
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon
* Web Services account.
*/
public void setTransformJobName(String transformJobName) {
this.transformJobName = transformJobName;
}
/**
*
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web
* Services account.
*
*
* @return The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon
* Web Services account.
*/
public String getTransformJobName() {
return this.transformJobName;
}
/**
*
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web
* Services account.
*
*
* @param transformJobName
* The name of the transform job. The name must be unique within an Amazon Web Services Region in an Amazon
* Web Services account.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTransformJobName(String transformJobName) {
setTransformJobName(transformJobName);
return this;
}
/**
*
* The name of the model that you want to use for the transform job. ModelName
must be the name of an
* existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account.
*
*
* @param modelName
* The name of the model that you want to use for the transform job. ModelName
must be the name
* of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services
* account.
*/
public void setModelName(String modelName) {
this.modelName = modelName;
}
/**
*
* The name of the model that you want to use for the transform job. ModelName
must be the name of an
* existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account.
*
*
* @return The name of the model that you want to use for the transform job. ModelName
must be the name
* of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services
* account.
*/
public String getModelName() {
return this.modelName;
}
/**
*
* The name of the model that you want to use for the transform job. ModelName
must be the name of an
* existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services account.
*
*
* @param modelName
* The name of the model that you want to use for the transform job. ModelName
must be the name
* of an existing Amazon SageMaker model within an Amazon Web Services Region in an Amazon Web Services
* account.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withModelName(String modelName) {
setModelName(modelName);
return this;
}
/**
*
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the optional
* execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is
* not enabled, the default value is 1
. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*
*
* @param maxConcurrentTransforms
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the
* optional execution-parameters to determine the settings for your chosen algorithm. If the
* execution-parameters endpoint is not enabled, the default value is 1
. For more information on
* execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*/
public void setMaxConcurrentTransforms(Integer maxConcurrentTransforms) {
this.maxConcurrentTransforms = maxConcurrentTransforms;
}
/**
*
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the optional
* execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is
* not enabled, the default value is 1
. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*
*
* @return The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the
* optional execution-parameters to determine the settings for your chosen algorithm. If the
* execution-parameters endpoint is not enabled, the default value is 1
. For more information
* on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*/
public Integer getMaxConcurrentTransforms() {
return this.maxConcurrentTransforms;
}
/**
*
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the optional
* execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is
* not enabled, the default value is 1
. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
*
*
* @param maxConcurrentTransforms
* The maximum number of parallel requests that can be sent to each instance in a transform job. If
* MaxConcurrentTransforms
is set to 0
or left unset, Amazon SageMaker checks the
* optional execution-parameters to determine the settings for your chosen algorithm. If the
* execution-parameters endpoint is not enabled, the default value is 1
. For more information on
* execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for
* MaxConcurrentTransforms
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withMaxConcurrentTransforms(Integer maxConcurrentTransforms) {
setMaxConcurrentTransforms(maxConcurrentTransforms);
return this;
}
/**
*
* Configures the timeout and maximum number of retries for processing a transform job invocation.
*
*
* @param modelClientConfig
* Configures the timeout and maximum number of retries for processing a transform job invocation.
*/
public void setModelClientConfig(ModelClientConfig modelClientConfig) {
this.modelClientConfig = modelClientConfig;
}
/**
*
* Configures the timeout and maximum number of retries for processing a transform job invocation.
*
*
* @return Configures the timeout and maximum number of retries for processing a transform job invocation.
*/
public ModelClientConfig getModelClientConfig() {
return this.modelClientConfig;
}
/**
*
* Configures the timeout and maximum number of retries for processing a transform job invocation.
*
*
* @param modelClientConfig
* Configures the timeout and maximum number of retries for processing a transform job invocation.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withModelClientConfig(ModelClientConfig modelClientConfig) {
setModelClientConfig(modelClientConfig);
return this;
}
/**
*
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a single
* record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To
* ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The
* default value is 6
MB.
*
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the
* value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in
* algorithms do not support HTTP chunked encoding.
*
*
* @param maxPayloadInMB
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a
* single record. To estimate the size of a record in MB, divide the size of your dataset by the number of
* records. To ensure that the records fit within the maximum payload size, we recommend using a slightly
* larger value. The default value is 6
MB.
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set
* the value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker
* built-in algorithms do not support HTTP chunked encoding.
*/
public void setMaxPayloadInMB(Integer maxPayloadInMB) {
this.maxPayloadInMB = maxPayloadInMB;
}
/**
*
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a single
* record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To
* ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The
* default value is 6
MB.
*
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the
* value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in
* algorithms do not support HTTP chunked encoding.
*
*
* @return The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a
* single record. To estimate the size of a record in MB, divide the size of your dataset by the number of
* records. To ensure that the records fit within the maximum payload size, we recommend using a slightly
* larger value. The default value is 6
MB.
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding,
* set the value to 0
. This feature works only in supported algorithms. Currently, Amazon
* SageMaker built-in algorithms do not support HTTP chunked encoding.
*/
public Integer getMaxPayloadInMB() {
return this.maxPayloadInMB;
}
/**
*
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a single
* record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To
* ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The
* default value is 6
MB.
*
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the
* value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in
* algorithms do not support HTTP chunked encoding.
*
*
* @param maxPayloadInMB
* The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without
* metadata). The value in MaxPayloadInMB
must be greater than, or equal to, the size of a
* single record. To estimate the size of a record in MB, divide the size of your dataset by the number of
* records. To ensure that the records fit within the maximum payload size, we recommend using a slightly
* larger value. The default value is 6
MB.
*
* The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify the
* MaxConcurrentTransforms
parameter, the value of
* (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100 MB.
*
*
* For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set
* the value to 0
. This feature works only in supported algorithms. Currently, Amazon SageMaker
* built-in algorithms do not support HTTP chunked encoding.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withMaxPayloadInMB(Integer maxPayloadInMB) {
setMaxPayloadInMB(maxPayloadInMB);
return this;
}
/**
*
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is
* a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
*
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to
* SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
*
*
* @param batchStrategy
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record
* is a single unit of input data that inference can be made on. For example, a single line in a CSV
* file is a record.
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set
* BatchStrategy
to SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
* @see BatchStrategy
*/
public void setBatchStrategy(String batchStrategy) {
this.batchStrategy = batchStrategy;
}
/**
*
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is
* a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
*
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to
* SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
*
*
* @return Specifies the number of records to include in a mini-batch for an HTTP inference request. A record
* is a single unit of input data that inference can be made on. For example, a single line in a CSV
* file is a record.
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set
* BatchStrategy
to SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
* @see BatchStrategy
*/
public String getBatchStrategy() {
return this.batchStrategy;
}
/**
*
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is
* a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
*
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to
* SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
*
*
* @param batchStrategy
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record
* is a single unit of input data that inference can be made on. For example, a single line in a CSV
* file is a record.
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set
* BatchStrategy
to SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see BatchStrategy
*/
public CreateTransformJobRequest withBatchStrategy(String batchStrategy) {
setBatchStrategy(batchStrategy);
return this;
}
/**
*
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is
* a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.
*
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set BatchStrategy
to
* SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
*
*
* @param batchStrategy
* Specifies the number of records to include in a mini-batch for an HTTP inference request. A record
* is a single unit of input data that inference can be made on. For example, a single line in a CSV
* file is a record.
*
* To enable the batch strategy, you must set the SplitType
property to Line
,
* RecordIO
, or TFRecord
.
*
*
* To use only one record when making an HTTP invocation request to a container, set
* BatchStrategy
to SingleRecord
and SplitType
to Line
.
*
*
* To fit as many records in a mini-batch as can fit within the MaxPayloadInMB
limit, set
* BatchStrategy
to MultiRecord
and SplitType
to Line
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see BatchStrategy
*/
public CreateTransformJobRequest withBatchStrategy(BatchStrategy batchStrategy) {
this.batchStrategy = batchStrategy.toString();
return this;
}
/**
*
* The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.
*
*
* @return The environment variables to set in the Docker container. We support up to 16 key and values entries in
* the map.
*/
public java.util.Map getEnvironment() {
return environment;
}
/**
*
* The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.
*
*
* @param environment
* The environment variables to set in the Docker container. We support up to 16 key and values entries in
* the map.
*/
public void setEnvironment(java.util.Map environment) {
this.environment = environment;
}
/**
*
* The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.
*
*
* @param environment
* The environment variables to set in the Docker container. We support up to 16 key and values entries in
* the map.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withEnvironment(java.util.Map environment) {
setEnvironment(environment);
return this;
}
/**
* Add a single Environment entry
*
* @see CreateTransformJobRequest#withEnvironment
* @returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest addEnvironmentEntry(String key, String value) {
if (null == this.environment) {
this.environment = new java.util.HashMap();
}
if (this.environment.containsKey(key))
throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided.");
this.environment.put(key, value);
return this;
}
/**
* Removes all the entries added into Environment.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest clearEnvironmentEntries() {
this.environment = null;
return this;
}
/**
*
* Describes the input source and the way the transform job consumes it.
*
*
* @param transformInput
* Describes the input source and the way the transform job consumes it.
*/
public void setTransformInput(TransformInput transformInput) {
this.transformInput = transformInput;
}
/**
*
* Describes the input source and the way the transform job consumes it.
*
*
* @return Describes the input source and the way the transform job consumes it.
*/
public TransformInput getTransformInput() {
return this.transformInput;
}
/**
*
* Describes the input source and the way the transform job consumes it.
*
*
* @param transformInput
* Describes the input source and the way the transform job consumes it.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTransformInput(TransformInput transformInput) {
setTransformInput(transformInput);
return this;
}
/**
*
* Describes the results of the transform job.
*
*
* @param transformOutput
* Describes the results of the transform job.
*/
public void setTransformOutput(TransformOutput transformOutput) {
this.transformOutput = transformOutput;
}
/**
*
* Describes the results of the transform job.
*
*
* @return Describes the results of the transform job.
*/
public TransformOutput getTransformOutput() {
return this.transformOutput;
}
/**
*
* Describes the results of the transform job.
*
*
* @param transformOutput
* Describes the results of the transform job.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTransformOutput(TransformOutput transformOutput) {
setTransformOutput(transformOutput);
return this;
}
/**
*
* Configuration to control how SageMaker captures inference data.
*
*
* @param dataCaptureConfig
* Configuration to control how SageMaker captures inference data.
*/
public void setDataCaptureConfig(BatchDataCaptureConfig dataCaptureConfig) {
this.dataCaptureConfig = dataCaptureConfig;
}
/**
*
* Configuration to control how SageMaker captures inference data.
*
*
* @return Configuration to control how SageMaker captures inference data.
*/
public BatchDataCaptureConfig getDataCaptureConfig() {
return this.dataCaptureConfig;
}
/**
*
* Configuration to control how SageMaker captures inference data.
*
*
* @param dataCaptureConfig
* Configuration to control how SageMaker captures inference data.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withDataCaptureConfig(BatchDataCaptureConfig dataCaptureConfig) {
setDataCaptureConfig(dataCaptureConfig);
return this;
}
/**
*
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*
*
* @param transformResources
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*/
public void setTransformResources(TransformResources transformResources) {
this.transformResources = transformResources;
}
/**
*
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*
*
* @return Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*/
public TransformResources getTransformResources() {
return this.transformResources;
}
/**
*
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
*
*
* @param transformResources
* Describes the resources, including ML instance types and ML instance count, to use for the transform job.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTransformResources(TransformResources transformResources) {
setTransformResources(transformResources);
return this;
}
/**
*
* The data structure used to specify the data to be used for inference in a batch transform job and to associate
* the data that is relevant to the prediction results in the output. The input filter provided allows you to
* exclude input data that is not needed for inference in a batch transform job. The output filter provided allows
* you to include input data relevant to interpreting the predictions in the output from the job. For more
* information, see Associate Prediction
* Results with their Corresponding Input Records.
*
*
* @param dataProcessing
* The data structure used to specify the data to be used for inference in a batch transform job and to
* associate the data that is relevant to the prediction results in the output. The input filter provided
* allows you to exclude input data that is not needed for inference in a batch transform job. The output
* filter provided allows you to include input data relevant to interpreting the predictions in the output
* from the job. For more information, see Associate
* Prediction Results with their Corresponding Input Records.
*/
public void setDataProcessing(DataProcessing dataProcessing) {
this.dataProcessing = dataProcessing;
}
/**
*
* The data structure used to specify the data to be used for inference in a batch transform job and to associate
* the data that is relevant to the prediction results in the output. The input filter provided allows you to
* exclude input data that is not needed for inference in a batch transform job. The output filter provided allows
* you to include input data relevant to interpreting the predictions in the output from the job. For more
* information, see Associate Prediction
* Results with their Corresponding Input Records.
*
*
* @return The data structure used to specify the data to be used for inference in a batch transform job and to
* associate the data that is relevant to the prediction results in the output. The input filter provided
* allows you to exclude input data that is not needed for inference in a batch transform job. The output
* filter provided allows you to include input data relevant to interpreting the predictions in the output
* from the job. For more information, see Associate
* Prediction Results with their Corresponding Input Records.
*/
public DataProcessing getDataProcessing() {
return this.dataProcessing;
}
/**
*
* The data structure used to specify the data to be used for inference in a batch transform job and to associate
* the data that is relevant to the prediction results in the output. The input filter provided allows you to
* exclude input data that is not needed for inference in a batch transform job. The output filter provided allows
* you to include input data relevant to interpreting the predictions in the output from the job. For more
* information, see Associate Prediction
* Results with their Corresponding Input Records.
*
*
* @param dataProcessing
* The data structure used to specify the data to be used for inference in a batch transform job and to
* associate the data that is relevant to the prediction results in the output. The input filter provided
* allows you to exclude input data that is not needed for inference in a batch transform job. The output
* filter provided allows you to include input data relevant to interpreting the predictions in the output
* from the job. For more information, see Associate
* Prediction Results with their Corresponding Input Records.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withDataProcessing(DataProcessing dataProcessing) {
setDataProcessing(dataProcessing);
return this;
}
/**
*
* (Optional) An array of key-value pairs. For more information, see Using
* Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*
*
* @return (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*/
public java.util.List getTags() {
return tags;
}
/**
*
* (Optional) An array of key-value pairs. For more information, see Using
* Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*
*
* @param tags
* (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*/
public void setTags(java.util.Collection tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new java.util.ArrayList(tags);
}
/**
*
* (Optional) An array of key-value pairs. For more information, see Using
* Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setTags(java.util.Collection)} or {@link #withTags(java.util.Collection)} if you want to override the
* existing values.
*
*
* @param tags
* (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTags(Tag... tags) {
if (this.tags == null) {
setTags(new java.util.ArrayList(tags.length));
}
for (Tag ele : tags) {
this.tags.add(ele);
}
return this;
}
/**
*
* (Optional) An array of key-value pairs. For more information, see Using
* Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
*
*
* @param tags
* (Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withTags(java.util.Collection tags) {
setTags(tags);
return this;
}
/**
* @param experimentConfig
*/
public void setExperimentConfig(ExperimentConfig experimentConfig) {
this.experimentConfig = experimentConfig;
}
/**
* @return
*/
public ExperimentConfig getExperimentConfig() {
return this.experimentConfig;
}
/**
* @param experimentConfig
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateTransformJobRequest withExperimentConfig(ExperimentConfig experimentConfig) {
setExperimentConfig(experimentConfig);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getTransformJobName() != null)
sb.append("TransformJobName: ").append(getTransformJobName()).append(",");
if (getModelName() != null)
sb.append("ModelName: ").append(getModelName()).append(",");
if (getMaxConcurrentTransforms() != null)
sb.append("MaxConcurrentTransforms: ").append(getMaxConcurrentTransforms()).append(",");
if (getModelClientConfig() != null)
sb.append("ModelClientConfig: ").append(getModelClientConfig()).append(",");
if (getMaxPayloadInMB() != null)
sb.append("MaxPayloadInMB: ").append(getMaxPayloadInMB()).append(",");
if (getBatchStrategy() != null)
sb.append("BatchStrategy: ").append(getBatchStrategy()).append(",");
if (getEnvironment() != null)
sb.append("Environment: ").append(getEnvironment()).append(",");
if (getTransformInput() != null)
sb.append("TransformInput: ").append(getTransformInput()).append(",");
if (getTransformOutput() != null)
sb.append("TransformOutput: ").append(getTransformOutput()).append(",");
if (getDataCaptureConfig() != null)
sb.append("DataCaptureConfig: ").append(getDataCaptureConfig()).append(",");
if (getTransformResources() != null)
sb.append("TransformResources: ").append(getTransformResources()).append(",");
if (getDataProcessing() != null)
sb.append("DataProcessing: ").append(getDataProcessing()).append(",");
if (getTags() != null)
sb.append("Tags: ").append(getTags()).append(",");
if (getExperimentConfig() != null)
sb.append("ExperimentConfig: ").append(getExperimentConfig());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateTransformJobRequest == false)
return false;
CreateTransformJobRequest other = (CreateTransformJobRequest) obj;
if (other.getTransformJobName() == null ^ this.getTransformJobName() == null)
return false;
if (other.getTransformJobName() != null && other.getTransformJobName().equals(this.getTransformJobName()) == false)
return false;
if (other.getModelName() == null ^ this.getModelName() == null)
return false;
if (other.getModelName() != null && other.getModelName().equals(this.getModelName()) == false)
return false;
if (other.getMaxConcurrentTransforms() == null ^ this.getMaxConcurrentTransforms() == null)
return false;
if (other.getMaxConcurrentTransforms() != null && other.getMaxConcurrentTransforms().equals(this.getMaxConcurrentTransforms()) == false)
return false;
if (other.getModelClientConfig() == null ^ this.getModelClientConfig() == null)
return false;
if (other.getModelClientConfig() != null && other.getModelClientConfig().equals(this.getModelClientConfig()) == false)
return false;
if (other.getMaxPayloadInMB() == null ^ this.getMaxPayloadInMB() == null)
return false;
if (other.getMaxPayloadInMB() != null && other.getMaxPayloadInMB().equals(this.getMaxPayloadInMB()) == false)
return false;
if (other.getBatchStrategy() == null ^ this.getBatchStrategy() == null)
return false;
if (other.getBatchStrategy() != null && other.getBatchStrategy().equals(this.getBatchStrategy()) == false)
return false;
if (other.getEnvironment() == null ^ this.getEnvironment() == null)
return false;
if (other.getEnvironment() != null && other.getEnvironment().equals(this.getEnvironment()) == false)
return false;
if (other.getTransformInput() == null ^ this.getTransformInput() == null)
return false;
if (other.getTransformInput() != null && other.getTransformInput().equals(this.getTransformInput()) == false)
return false;
if (other.getTransformOutput() == null ^ this.getTransformOutput() == null)
return false;
if (other.getTransformOutput() != null && other.getTransformOutput().equals(this.getTransformOutput()) == false)
return false;
if (other.getDataCaptureConfig() == null ^ this.getDataCaptureConfig() == null)
return false;
if (other.getDataCaptureConfig() != null && other.getDataCaptureConfig().equals(this.getDataCaptureConfig()) == false)
return false;
if (other.getTransformResources() == null ^ this.getTransformResources() == null)
return false;
if (other.getTransformResources() != null && other.getTransformResources().equals(this.getTransformResources()) == false)
return false;
if (other.getDataProcessing() == null ^ this.getDataProcessing() == null)
return false;
if (other.getDataProcessing() != null && other.getDataProcessing().equals(this.getDataProcessing()) == false)
return false;
if (other.getTags() == null ^ this.getTags() == null)
return false;
if (other.getTags() != null && other.getTags().equals(this.getTags()) == false)
return false;
if (other.getExperimentConfig() == null ^ this.getExperimentConfig() == null)
return false;
if (other.getExperimentConfig() != null && other.getExperimentConfig().equals(this.getExperimentConfig()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getTransformJobName() == null) ? 0 : getTransformJobName().hashCode());
hashCode = prime * hashCode + ((getModelName() == null) ? 0 : getModelName().hashCode());
hashCode = prime * hashCode + ((getMaxConcurrentTransforms() == null) ? 0 : getMaxConcurrentTransforms().hashCode());
hashCode = prime * hashCode + ((getModelClientConfig() == null) ? 0 : getModelClientConfig().hashCode());
hashCode = prime * hashCode + ((getMaxPayloadInMB() == null) ? 0 : getMaxPayloadInMB().hashCode());
hashCode = prime * hashCode + ((getBatchStrategy() == null) ? 0 : getBatchStrategy().hashCode());
hashCode = prime * hashCode + ((getEnvironment() == null) ? 0 : getEnvironment().hashCode());
hashCode = prime * hashCode + ((getTransformInput() == null) ? 0 : getTransformInput().hashCode());
hashCode = prime * hashCode + ((getTransformOutput() == null) ? 0 : getTransformOutput().hashCode());
hashCode = prime * hashCode + ((getDataCaptureConfig() == null) ? 0 : getDataCaptureConfig().hashCode());
hashCode = prime * hashCode + ((getTransformResources() == null) ? 0 : getTransformResources().hashCode());
hashCode = prime * hashCode + ((getDataProcessing() == null) ? 0 : getDataProcessing().hashCode());
hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode());
hashCode = prime * hashCode + ((getExperimentConfig() == null) ? 0 : getExperimentConfig().hashCode());
return hashCode;
}
@Override
public CreateTransformJobRequest clone() {
return (CreateTransformJobRequest) super.clone();
}
}