com.amazonaws.services.forecast.model.CreateDatasetImportJobRequest Maven / Gradle / Ivy
Show all versions of aws-java-sdk-forecast Show documentation
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.forecast.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see AWS
* API Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateDatasetImportJobRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
*
* The name for the dataset import job. We recommend including the current timestamp in the name, for example,
* 20190721DatasetImport
. This can help you avoid getting a ResourceAlreadyExistsException
* exception.
*
*/
private String datasetImportJobName;
/**
*
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*
*/
private String datasetArn;
/**
*
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon Forecast
* can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the IAM role
* must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in
* the EncryptionConfig
parameter of the CreateDataset operation.
*
*/
private DataSource dataSource;
/**
*
* The format of timestamps in the dataset. The format that you specify depends on the DataFrequency
* specified when the dataset was created. The following formats are supported
*
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*
*/
private String timestampFormat;
/**
*
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps within a
* single time zone, or if all timestamps are normalized to a single time zone.
*
*
* Refer to the Joda-Time API for a complete list of
* valid time zone names.
*
*/
private String timeZone;
/**
*
* Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that
* contain timestamps in multiple time zones and those timestamps are expressed in local time.
*
*/
private Boolean useGeolocationForTimeZone;
/**
*
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two ways:
*
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*
*/
private String geolocationFormat;
/**
*
* The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag
* consists of a key and an optional value, both of which you define.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this prefix. Values
* can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast
* considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of
* aws
do not count against your tags per resource limit.
*
*
*
*/
private java.util.List tags;
/**
*
* The format of the imported data, CSV or PARQUET. The default value is CSV.
*
*/
private String format;
/**
*
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
*
*/
private String importMode;
/**
*
* The name for the dataset import job. We recommend including the current timestamp in the name, for example,
* 20190721DatasetImport
. This can help you avoid getting a ResourceAlreadyExistsException
* exception.
*
*
* @param datasetImportJobName
* The name for the dataset import job. We recommend including the current timestamp in the name, for
* example, 20190721DatasetImport
. This can help you avoid getting a
* ResourceAlreadyExistsException
exception.
*/
public void setDatasetImportJobName(String datasetImportJobName) {
this.datasetImportJobName = datasetImportJobName;
}
/**
*
* The name for the dataset import job. We recommend including the current timestamp in the name, for example,
* 20190721DatasetImport
. This can help you avoid getting a ResourceAlreadyExistsException
* exception.
*
*
* @return The name for the dataset import job. We recommend including the current timestamp in the name, for
* example, 20190721DatasetImport
. This can help you avoid getting a
* ResourceAlreadyExistsException
exception.
*/
public String getDatasetImportJobName() {
return this.datasetImportJobName;
}
/**
*
* The name for the dataset import job. We recommend including the current timestamp in the name, for example,
* 20190721DatasetImport
. This can help you avoid getting a ResourceAlreadyExistsException
* exception.
*
*
* @param datasetImportJobName
* The name for the dataset import job. We recommend including the current timestamp in the name, for
* example, 20190721DatasetImport
. This can help you avoid getting a
* ResourceAlreadyExistsException
exception.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withDatasetImportJobName(String datasetImportJobName) {
setDatasetImportJobName(datasetImportJobName);
return this;
}
/**
*
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*
*
* @param datasetArn
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*/
public void setDatasetArn(String datasetArn) {
this.datasetArn = datasetArn;
}
/**
*
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*
*
* @return The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*/
public String getDatasetArn() {
return this.datasetArn;
}
/**
*
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
*
*
* @param datasetArn
* The Amazon Resource Name (ARN) of the Amazon Forecast dataset that you want to import data to.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withDatasetArn(String datasetArn) {
setDatasetArn(datasetArn);
return this;
}
/**
*
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon Forecast
* can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the IAM role
* must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in
* the EncryptionConfig
parameter of the CreateDataset operation.
*
*
* @param dataSource
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the
* IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match
* those specified in the EncryptionConfig
parameter of the CreateDataset operation.
*/
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
/**
*
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon Forecast
* can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the IAM role
* must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in
* the EncryptionConfig
parameter of the CreateDataset operation.
*
*
* @return The location of the training data to import and an Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the
* IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match
* those specified in the EncryptionConfig
parameter of the CreateDataset operation.
*/
public DataSource getDataSource() {
return this.dataSource;
}
/**
*
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon Forecast
* can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the IAM role
* must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match those specified in
* the EncryptionConfig
parameter of the CreateDataset operation.
*
*
* @param dataSource
* The location of the training data to import and an Identity and Access Management (IAM) role that Amazon
* Forecast can assume to access the data. The training data must be stored in an Amazon S3 bucket.
*
* If encryption is used, DataSource
must include an Key Management Service (KMS) key and the
* IAM role must allow Amazon Forecast permission to access the key. The KMS key and IAM role must match
* those specified in the EncryptionConfig
parameter of the CreateDataset operation.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withDataSource(DataSource dataSource) {
setDataSource(dataSource);
return this;
}
/**
*
* The format of timestamps in the dataset. The format that you specify depends on the DataFrequency
* specified when the dataset was created. The following formats are supported
*
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*
*
* @param timestampFormat
* The format of timestamps in the dataset. The format that you specify depends on the
* DataFrequency
specified when the dataset was created. The following formats are supported
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*/
public void setTimestampFormat(String timestampFormat) {
this.timestampFormat = timestampFormat;
}
/**
*
* The format of timestamps in the dataset. The format that you specify depends on the DataFrequency
* specified when the dataset was created. The following formats are supported
*
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*
*
* @return The format of timestamps in the dataset. The format that you specify depends on the
* DataFrequency
specified when the dataset was created. The following formats are
* supported
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*/
public String getTimestampFormat() {
return this.timestampFormat;
}
/**
*
* The format of timestamps in the dataset. The format that you specify depends on the DataFrequency
* specified when the dataset was created. The following formats are supported
*
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
*
*
* @param timestampFormat
* The format of timestamps in the dataset. The format that you specify depends on the
* DataFrequency
specified when the dataset was created. The following formats are supported
*
* -
*
* "yyyy-MM-dd"
*
*
* For the following data frequencies: Y, M, W, and D
*
*
* -
*
* "yyyy-MM-dd HH:mm:ss"
*
*
* For the following data frequencies: H, 30min, 15min, and 1min; and optionally, for: Y, M, W, and D
*
*
*
*
* If the format isn't specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss".
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withTimestampFormat(String timestampFormat) {
setTimestampFormat(timestampFormat);
return this;
}
/**
*
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps within a
* single time zone, or if all timestamps are normalized to a single time zone.
*
*
* Refer to the Joda-Time API for a complete list of
* valid time zone names.
*
*
* @param timeZone
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps
* within a single time zone, or if all timestamps are normalized to a single time zone.
*
* Refer to the Joda-Time API for a complete
* list of valid time zone names.
*/
public void setTimeZone(String timeZone) {
this.timeZone = timeZone;
}
/**
*
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps within a
* single time zone, or if all timestamps are normalized to a single time zone.
*
*
* Refer to the Joda-Time API for a complete list of
* valid time zone names.
*
*
* @return A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps
* within a single time zone, or if all timestamps are normalized to a single time zone.
*
* Refer to the Joda-Time API for a complete
* list of valid time zone names.
*/
public String getTimeZone() {
return this.timeZone;
}
/**
*
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps within a
* single time zone, or if all timestamps are normalized to a single time zone.
*
*
* Refer to the Joda-Time API for a complete list of
* valid time zone names.
*
*
* @param timeZone
* A single time zone for every item in your dataset. This option is ideal for datasets with all timestamps
* within a single time zone, or if all timestamps are normalized to a single time zone.
*
* Refer to the Joda-Time API for a complete
* list of valid time zone names.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withTimeZone(String timeZone) {
setTimeZone(timeZone);
return this;
}
/**
*
* Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that
* contain timestamps in multiple time zones and those timestamps are expressed in local time.
*
*
* @param useGeolocationForTimeZone
* Automatically derive time zone information from the geolocation attribute. This option is ideal for
* datasets that contain timestamps in multiple time zones and those timestamps are expressed in local time.
*/
public void setUseGeolocationForTimeZone(Boolean useGeolocationForTimeZone) {
this.useGeolocationForTimeZone = useGeolocationForTimeZone;
}
/**
*
* Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that
* contain timestamps in multiple time zones and those timestamps are expressed in local time.
*
*
* @return Automatically derive time zone information from the geolocation attribute. This option is ideal for
* datasets that contain timestamps in multiple time zones and those timestamps are expressed in local time.
*/
public Boolean getUseGeolocationForTimeZone() {
return this.useGeolocationForTimeZone;
}
/**
*
* Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that
* contain timestamps in multiple time zones and those timestamps are expressed in local time.
*
*
* @param useGeolocationForTimeZone
* Automatically derive time zone information from the geolocation attribute. This option is ideal for
* datasets that contain timestamps in multiple time zones and those timestamps are expressed in local time.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withUseGeolocationForTimeZone(Boolean useGeolocationForTimeZone) {
setUseGeolocationForTimeZone(useGeolocationForTimeZone);
return this;
}
/**
*
* Automatically derive time zone information from the geolocation attribute. This option is ideal for datasets that
* contain timestamps in multiple time zones and those timestamps are expressed in local time.
*
*
* @return Automatically derive time zone information from the geolocation attribute. This option is ideal for
* datasets that contain timestamps in multiple time zones and those timestamps are expressed in local time.
*/
public Boolean isUseGeolocationForTimeZone() {
return this.useGeolocationForTimeZone;
}
/**
*
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two ways:
*
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*
*
* @param geolocationFormat
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two
* ways:
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*/
public void setGeolocationFormat(String geolocationFormat) {
this.geolocationFormat = geolocationFormat;
}
/**
*
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two ways:
*
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*
*
* @return The format of the geolocation attribute. The geolocation attribute can be formatted in one of two
* ways:
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*/
public String getGeolocationFormat() {
return this.geolocationFormat;
}
/**
*
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two ways:
*
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
*
*
* @param geolocationFormat
* The format of the geolocation attribute. The geolocation attribute can be formatted in one of two
* ways:
*
* -
*
* LAT_LONG
- the latitude and longitude in decimal format (Example: 47.61_-122.33).
*
*
* -
*
* CC_POSTALCODE
(US Only) - the country code (US), followed by the 5-digit ZIP code (Example:
* US_98121).
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withGeolocationFormat(String geolocationFormat) {
setGeolocationFormat(geolocationFormat);
return this;
}
/**
*
* The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag
* consists of a key and an optional value, both of which you define.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this prefix. Values
* can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast
* considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of
* aws
do not count against your tags per resource limit.
*
*
*
*
* @return The optional metadata that you apply to the dataset import job to help you categorize and organize them.
* Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a
* prefix for keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with
* this prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key
* does not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags
* with only the key prefix of aws
do not count against your tags per resource limit.
*
*
*/
public java.util.List getTags() {
return tags;
}
/**
*
* The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag
* consists of a key and an optional value, both of which you define.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this prefix. Values
* can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast
* considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of
* aws
do not count against your tags per resource limit.
*
*
*
*
* @param tags
* The optional metadata that you apply to the dataset import job to help you categorize and organize them.
* Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this
* prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does
* not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with
* only the key prefix of aws
do not count against your tags per resource limit.
*
*
*/
public void setTags(java.util.Collection tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new java.util.ArrayList(tags);
}
/**
*
* The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag
* consists of a key and an optional value, both of which you define.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this prefix. Values
* can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast
* considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of
* aws
do not count against your tags per resource limit.
*
*
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setTags(java.util.Collection)} or {@link #withTags(java.util.Collection)} if you want to override the
* existing values.
*
*
* @param tags
* The optional metadata that you apply to the dataset import job to help you categorize and organize them.
* Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this
* prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does
* not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with
* only the key prefix of aws
do not count against your tags per resource limit.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withTags(Tag... tags) {
if (this.tags == null) {
setTags(new java.util.ArrayList(tags.length));
}
for (Tag ele : tags) {
this.tags.add(ele);
}
return this;
}
/**
*
* The optional metadata that you apply to the dataset import job to help you categorize and organize them. Each tag
* consists of a key and an optional value, both of which you define.
*
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may have
* restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable
* in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for
* keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this prefix. Values
* can have this prefix. If a tag value has aws
as its prefix but the key does not, then Forecast
* considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of
* aws
do not count against your tags per resource limit.
*
*
*
*
* @param tags
* The optional metadata that you apply to the dataset import job to help you categorize and organize them.
* Each tag consists of a key and an optional value, both of which you define.
*
* The following basic restrictions apply to tags:
*
*
* -
*
* Maximum number of tags per resource - 50.
*
*
* -
*
* For each resource, each tag key must be unique, and each tag key can have only one value.
*
*
* -
*
* Maximum key length - 128 Unicode characters in UTF-8.
*
*
* -
*
* Maximum value length - 256 Unicode characters in UTF-8.
*
*
* -
*
* If your tagging schema is used across multiple services and resources, remember that other services may
* have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces
* representable in UTF-8, and the following characters: + - = . _ : / @.
*
*
* -
*
* Tag keys and values are case sensitive.
*
*
* -
*
* Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix
* for keys as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys with this
* prefix. Values can have this prefix. If a tag value has aws
as its prefix but the key does
* not, then Forecast considers it to be a user tag and will count against the limit of 50 tags. Tags with
* only the key prefix of aws
do not count against your tags per resource limit.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withTags(java.util.Collection tags) {
setTags(tags);
return this;
}
/**
*
* The format of the imported data, CSV or PARQUET. The default value is CSV.
*
*
* @param format
* The format of the imported data, CSV or PARQUET. The default value is CSV.
*/
public void setFormat(String format) {
this.format = format;
}
/**
*
* The format of the imported data, CSV or PARQUET. The default value is CSV.
*
*
* @return The format of the imported data, CSV or PARQUET. The default value is CSV.
*/
public String getFormat() {
return this.format;
}
/**
*
* The format of the imported data, CSV or PARQUET. The default value is CSV.
*
*
* @param format
* The format of the imported data, CSV or PARQUET. The default value is CSV.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateDatasetImportJobRequest withFormat(String format) {
setFormat(format);
return this;
}
/**
*
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
*
*
* @param importMode
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
* @see ImportMode
*/
public void setImportMode(String importMode) {
this.importMode = importMode;
}
/**
*
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
*
*
* @return Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
* @see ImportMode
*/
public String getImportMode() {
return this.importMode;
}
/**
*
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
*
*
* @param importMode
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ImportMode
*/
public CreateDatasetImportJobRequest withImportMode(String importMode) {
setImportMode(importMode);
return this;
}
/**
*
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
*
*
* @param importMode
* Specifies whether the dataset import job is a FULL
or INCREMENTAL
import. A
* FULL
dataset import replaces all of the existing data with the newly imported data. An
* INCREMENTAL
import appends the imported data to the existing data.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ImportMode
*/
public CreateDatasetImportJobRequest withImportMode(ImportMode importMode) {
this.importMode = importMode.toString();
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getDatasetImportJobName() != null)
sb.append("DatasetImportJobName: ").append(getDatasetImportJobName()).append(",");
if (getDatasetArn() != null)
sb.append("DatasetArn: ").append(getDatasetArn()).append(",");
if (getDataSource() != null)
sb.append("DataSource: ").append(getDataSource()).append(",");
if (getTimestampFormat() != null)
sb.append("TimestampFormat: ").append(getTimestampFormat()).append(",");
if (getTimeZone() != null)
sb.append("TimeZone: ").append(getTimeZone()).append(",");
if (getUseGeolocationForTimeZone() != null)
sb.append("UseGeolocationForTimeZone: ").append(getUseGeolocationForTimeZone()).append(",");
if (getGeolocationFormat() != null)
sb.append("GeolocationFormat: ").append(getGeolocationFormat()).append(",");
if (getTags() != null)
sb.append("Tags: ").append(getTags()).append(",");
if (getFormat() != null)
sb.append("Format: ").append(getFormat()).append(",");
if (getImportMode() != null)
sb.append("ImportMode: ").append(getImportMode());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateDatasetImportJobRequest == false)
return false;
CreateDatasetImportJobRequest other = (CreateDatasetImportJobRequest) obj;
if (other.getDatasetImportJobName() == null ^ this.getDatasetImportJobName() == null)
return false;
if (other.getDatasetImportJobName() != null && other.getDatasetImportJobName().equals(this.getDatasetImportJobName()) == false)
return false;
if (other.getDatasetArn() == null ^ this.getDatasetArn() == null)
return false;
if (other.getDatasetArn() != null && other.getDatasetArn().equals(this.getDatasetArn()) == false)
return false;
if (other.getDataSource() == null ^ this.getDataSource() == null)
return false;
if (other.getDataSource() != null && other.getDataSource().equals(this.getDataSource()) == false)
return false;
if (other.getTimestampFormat() == null ^ this.getTimestampFormat() == null)
return false;
if (other.getTimestampFormat() != null && other.getTimestampFormat().equals(this.getTimestampFormat()) == false)
return false;
if (other.getTimeZone() == null ^ this.getTimeZone() == null)
return false;
if (other.getTimeZone() != null && other.getTimeZone().equals(this.getTimeZone()) == false)
return false;
if (other.getUseGeolocationForTimeZone() == null ^ this.getUseGeolocationForTimeZone() == null)
return false;
if (other.getUseGeolocationForTimeZone() != null && other.getUseGeolocationForTimeZone().equals(this.getUseGeolocationForTimeZone()) == false)
return false;
if (other.getGeolocationFormat() == null ^ this.getGeolocationFormat() == null)
return false;
if (other.getGeolocationFormat() != null && other.getGeolocationFormat().equals(this.getGeolocationFormat()) == false)
return false;
if (other.getTags() == null ^ this.getTags() == null)
return false;
if (other.getTags() != null && other.getTags().equals(this.getTags()) == false)
return false;
if (other.getFormat() == null ^ this.getFormat() == null)
return false;
if (other.getFormat() != null && other.getFormat().equals(this.getFormat()) == false)
return false;
if (other.getImportMode() == null ^ this.getImportMode() == null)
return false;
if (other.getImportMode() != null && other.getImportMode().equals(this.getImportMode()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getDatasetImportJobName() == null) ? 0 : getDatasetImportJobName().hashCode());
hashCode = prime * hashCode + ((getDatasetArn() == null) ? 0 : getDatasetArn().hashCode());
hashCode = prime * hashCode + ((getDataSource() == null) ? 0 : getDataSource().hashCode());
hashCode = prime * hashCode + ((getTimestampFormat() == null) ? 0 : getTimestampFormat().hashCode());
hashCode = prime * hashCode + ((getTimeZone() == null) ? 0 : getTimeZone().hashCode());
hashCode = prime * hashCode + ((getUseGeolocationForTimeZone() == null) ? 0 : getUseGeolocationForTimeZone().hashCode());
hashCode = prime * hashCode + ((getGeolocationFormat() == null) ? 0 : getGeolocationFormat().hashCode());
hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode());
hashCode = prime * hashCode + ((getFormat() == null) ? 0 : getFormat().hashCode());
hashCode = prime * hashCode + ((getImportMode() == null) ? 0 : getImportMode().hashCode());
return hashCode;
}
@Override
public CreateDatasetImportJobRequest clone() {
return (CreateDatasetImportJobRequest) super.clone();
}
}