com.amazonaws.services.databasemigrationservice.model.S3Settings Maven / Gradle / Ivy
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.databasemigrationservice.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Settings for exporting data to Amazon S3.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class S3Settings implements Serializable, Cloneable, StructuredPojo {
/**
*
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects from an
* S3 bucket.
*
*/
private String serviceAccessRoleArn;
/**
*
* Specifies how tables are defined in the S3 source files only.
*
*/
private String externalTableDefinition;
/**
*
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return
* (\n
).
*
*/
private String csvRowDelimiter;
/**
*
* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma.
*
*/
private String csvDelimiter;
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*/
private String bucketFolder;
/**
*
* The name of the S3 bucket.
*
*/
private String bucketName;
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*/
private String compressionType;
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*/
private String encryptionMode;
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key that
* you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows
* use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*/
private String serverSideEncryptionKmsKeyId;
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*/
private String dataFormat;
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*/
private String encodingType;
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*/
private Integer dictPageSizeLimit;
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*/
private Integer rowGroupLength;
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*/
private Integer dataPageSize;
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*/
private String parquetVersion;
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*/
private Boolean enableStatistics;
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet
* output files only to indicate how the rows were added to the source database.
*
*
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in versions 3.4.7
* and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*/
private Boolean includeOpForFullLoad;
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*/
private Boolean cdcInsertsOnly;
/**
*
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*/
private String timestampColumnName;
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set
* this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to
* query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*/
private Boolean parquetTimestampInMillisecond;
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
*
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record
* is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of
* INSERT or UPDATE operations at the source. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*/
private Boolean cdcInsertsAndUpdates;
/**
*
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The
* default value is false
. For more information about date-based folder partitioning, see Using
* date-based folder partitioning.
*
*/
private Boolean datePartitionEnabled;
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*/
private String datePartitionSequence;
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*/
private String datePartitionDelimiter;
/**
*
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format.
* If set to true
for columns not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these
* columns.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*/
private Boolean useCsvNoSupValue;
/**
*
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in
* .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for all
* columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for
* these columns regardless of the UseCsvNoSupValue
setting.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*/
private String csvNoSupValue;
/**
*
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon
* S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*/
private Boolean preserveTransactions;
/**
*
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change
* data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this parameter to a
* folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC
* folder path in either your S3 target working directory or the S3 target location specified by
* BucketFolder
and
* BucketName
.
*
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS creates
* the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the CDC
* folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*/
private String cdcPath;
/**
*
* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data
* is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is set to
* true
, each row of the timestamp column contains the task start time. For CDC loads, each row of the
* timestamp column contains the transaction commit time.
*
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load timestamp in
* the timestamp column increments with the time data arrives at the target.
*
*/
private Boolean useTaskStartTimeForFullLoadTimestamp;
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*/
private String cannedAclForObjects;
/**
*
* An optional parameter that, when set to true
or y
, you can use to add column name
* information to the .csv output file.
*
*
* The default value is false
. Valid values are true
, false
, y
,
* and n
.
*
*/
private Boolean addColumnName;
/**
*
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*
*/
private Integer cdcMaxBatchInterval;
/**
*
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*
*/
private Integer cdcMinFileSize;
/**
*
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this
* parameter to pass a user-defined string as null when writing to the target. For example, when target columns are
* nullable, you can use this option to differentiate between the empty string value and the null value. So, if you
* set this parameter value to the empty string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
*
*
* The default value is NULL
. Valid values include any valid string.
*
*/
private String csvNullValue;
/**
*
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the feature;
* a value of 0 turns off the feature.
*
*
* The default is 0.
*
*/
private Integer ignoreHeaderRows;
/**
*
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3 target
* during full load.
*
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*
*/
private Integer maxFileSize;
/**
*
* For an S3 source, when this value is set to true
or y
, each leading double quotation
* mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied to the target as is. In this
* case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the
* string, because it signals the end of the value.
*
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon
* S3 using .csv file format only. When this value is set to true
or y
using Amazon S3 as
* a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an
* additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*
*/
private Boolean rfc4180;
/**
*
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC time into
* a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is
* generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled
is
* set to true
, as shown in the following example.
*
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*
*/
private String datePartitionTimezone;
/**
*
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data. The
* default value is false
.
*
*/
private Boolean addTrailingPaddingCharacter;
/**
*
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint setting.
*
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner
* against the specified parameter.
*
*/
private String expectedBucketOwner;
/**
*
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your
* data.
*
*/
private Boolean glueCatalogGeneration;
/**
*
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects from an
* S3 bucket.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects
* from an S3 bucket.
*/
public void setServiceAccessRoleArn(String serviceAccessRoleArn) {
this.serviceAccessRoleArn = serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects from an
* S3 bucket.
*
*
* @return The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects
* from an S3 bucket.
*/
public String getServiceAccessRoleArn() {
return this.serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects from an
* S3 bucket.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service to access the IAM role. The role must allow the
* iam:PassRole
action. It is a required parameter that enables DMS to write and read objects
* from an S3 bucket.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServiceAccessRoleArn(String serviceAccessRoleArn) {
setServiceAccessRoleArn(serviceAccessRoleArn);
return this;
}
/**
*
* Specifies how tables are defined in the S3 source files only.
*
*
* @param externalTableDefinition
* Specifies how tables are defined in the S3 source files only.
*/
public void setExternalTableDefinition(String externalTableDefinition) {
this.externalTableDefinition = externalTableDefinition;
}
/**
*
* Specifies how tables are defined in the S3 source files only.
*
*
* @return Specifies how tables are defined in the S3 source files only.
*/
public String getExternalTableDefinition() {
return this.externalTableDefinition;
}
/**
*
* Specifies how tables are defined in the S3 source files only.
*
*
* @param externalTableDefinition
* Specifies how tables are defined in the S3 source files only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withExternalTableDefinition(String externalTableDefinition) {
setExternalTableDefinition(externalTableDefinition);
return this;
}
/**
*
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return
* (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage
* return (\n
).
*/
public void setCsvRowDelimiter(String csvRowDelimiter) {
this.csvRowDelimiter = csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return
* (\n
).
*
*
* @return The delimiter used to separate rows in the .csv file for both source and target. The default is a
* carriage return (\n
).
*/
public String getCsvRowDelimiter() {
return this.csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage return
* (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the .csv file for both source and target. The default is a carriage
* return (\n
).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvRowDelimiter(String csvRowDelimiter) {
setCsvRowDelimiter(csvRowDelimiter);
return this;
}
/**
*
* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the .csv file for both source and target. The default is a
* comma.
*/
public void setCsvDelimiter(String csvDelimiter) {
this.csvDelimiter = csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma.
*
*
* @return The delimiter used to separate columns in the .csv file for both source and target. The default is a
* comma.
*/
public String getCsvDelimiter() {
return this.csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the .csv file for both source and target. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the .csv file for both source and target. The default is a
* comma.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvDelimiter(String csvDelimiter) {
setCsvDelimiter(csvDelimiter);
return this;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
*/
public void setBucketFolder(String bucketFolder) {
this.bucketFolder = bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @return An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
*/
public String getBucketFolder() {
return this.bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketFolder(String bucketFolder) {
setBucketFolder(bucketFolder);
return this;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @return The name of the S3 bucket.
*/
public String getBucketName() {
return this.bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketName(String bucketName) {
setBucketName(bucketName);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @return An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public String getCompressionType() {
return this.compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(String compressionType) {
setCompressionType(compressionType);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public void setCompressionType(CompressionTypeValue compressionType) {
withCompressionType(compressionType);
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(CompressionTypeValue compressionType) {
this.compressionType = compressionType.toString();
return this;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(String encryptionMode) {
this.encryptionMode = encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @return The type of server-side encryption that you want to use for your data. This encryption type is part of
* the endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to
* allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public String getEncryptionMode() {
return this.encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(String encryptionMode) {
setEncryptionMode(encryptionMode);
return this;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(EncryptionModeValue encryptionMode) {
withEncryptionMode(encryptionMode);
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
.
*
*
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t change the
* existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
.
*
* For the ModifyEndpoint
operation, you can change the existing value of the
* EncryptionMode
parameter from SSE_KMS
to SSE_S3
. But you can’t
* change the existing value from SSE_S3
to SSE_KMS
.
*
*
*
* To use SSE_S3
, you need an Identity and Access Management (IAM) role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(EncryptionModeValue encryptionMode) {
this.encryptionMode = encryptionMode.toString();
return this;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key that
* you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows
* use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key
* that you use needs an attached policy that enables Identity and Access Management (IAM) user permissions
* and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*/
public void setServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
this.serverSideEncryptionKmsKeyId = serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key that
* you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows
* use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @return If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The
* key that you use needs an attached policy that enables Identity and Access Management (IAM) user
* permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*/
public String getServerSideEncryptionKmsKeyId() {
return this.serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key that
* you use needs an attached policy that enables Identity and Access Management (IAM) user permissions and allows
* use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS
for the EncryptionMode
, provide the KMS key ID. The key
* that you use needs an attached policy that enables Identity and Access Management (IAM) user permissions
* and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
setServerSideEncryptionKmsKeyId(serverSideEncryptionKmsKeyId);
return this;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(String dataFormat) {
this.dataFormat = dataFormat;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @return The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features
* efficient compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public String getDataFormat() {
return this.dataFormat;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(String dataFormat) {
setDataFormat(dataFormat);
return this;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(DataFormatValue dataFormat) {
withDataFormat(dataFormat);
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(DataFormatValue dataFormat) {
this.dataFormat = dataFormat.toString();
return this;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(String encodingType) {
this.encodingType = encodingType;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @return The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public String getEncodingType() {
return this.encodingType;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(String encodingType) {
setEncodingType(encodingType);
return this;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(EncodingTypeValue encodingType) {
withEncodingType(encodingType);
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(EncodingTypeValue encodingType) {
this.encodingType = encodingType.toString();
return this;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
*/
public void setDictPageSizeLimit(Integer dictPageSizeLimit) {
this.dictPageSizeLimit = dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @return The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
*/
public Integer getDictPageSizeLimit() {
return this.dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDictPageSizeLimit(Integer dictPageSizeLimit) {
setDictPageSizeLimit(dictPageSizeLimit);
return this;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public void setRowGroupLength(Integer rowGroupLength) {
this.rowGroupLength = rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @return The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public Integer getRowGroupLength() {
return this.rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withRowGroupLength(Integer rowGroupLength) {
setRowGroupLength(rowGroupLength);
return this;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
*/
public void setDataPageSize(Integer dataPageSize) {
this.dataPageSize = dataPageSize;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @return The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
*/
public Integer getDataPageSize() {
return this.dataPageSize;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDataPageSize(Integer dataPageSize) {
setDataPageSize(dataPageSize);
return this;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(String parquetVersion) {
this.parquetVersion = parquetVersion;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @return The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public String getParquetVersion() {
return this.parquetVersion;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(String parquetVersion) {
setParquetVersion(parquetVersion);
return this;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(ParquetVersionValue parquetVersion) {
withParquetVersion(parquetVersion);
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(ParquetVersionValue parquetVersion) {
this.parquetVersion = parquetVersion.toString();
return this;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @param enableStatistics
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value is
* used for .parquet file format only.
*/
public void setEnableStatistics(Boolean enableStatistics) {
this.enableStatistics = enableStatistics;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @return A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value
* is used for .parquet file format only.
*/
public Boolean getEnableStatistics() {
return this.enableStatistics;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @param enableStatistics
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value is
* used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withEnableStatistics(Boolean enableStatistics) {
setEnableStatistics(enableStatistics);
return this;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @return A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value
* is used for .parquet file format only.
*/
public Boolean isEnableStatistics() {
return this.enableStatistics;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet
* output files only to indicate how the rows were added to the source database.
*
*
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in versions 3.4.7
* and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* @param includeOpForFullLoad
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or
* .parquet output files only to indicate how the rows were added to the source database.
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in
* versions 3.4.7 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*/
public void setIncludeOpForFullLoad(Boolean includeOpForFullLoad) {
this.includeOpForFullLoad = includeOpForFullLoad;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet
* output files only to indicate how the rows were added to the source database.
*
*
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in versions 3.4.7
* and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* @return A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or
* .parquet output files only to indicate how the rows were added to the source database.
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in
* versions 3.4.7 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about
* how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*/
public Boolean getIncludeOpForFullLoad() {
return this.includeOpForFullLoad;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet
* output files only to indicate how the rows were added to the source database.
*
*
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in versions 3.4.7
* and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* @param includeOpForFullLoad
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or
* .parquet output files only to indicate how the rows were added to the source database.
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in
* versions 3.4.7 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withIncludeOpForFullLoad(Boolean includeOpForFullLoad) {
setIncludeOpForFullLoad(includeOpForFullLoad);
return this;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or .parquet
* output files only to indicate how the rows were added to the source database.
*
*
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in versions 3.4.7
* and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* @return A value that enables a full load to write INSERT operations to the comma-separated value (.csv) or
* .parquet output files only to indicate how the rows were added to the source database.
*
* DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
* DMS supports the use of the .parquet files with the IncludeOpForFullLoad
parameter in
* versions 3.4.7 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about
* how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*/
public Boolean isIncludeOpForFullLoad() {
return this.includeOpForFullLoad;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsOnly
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv or
* .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether
* the row was inserted, updated, or deleted at the source database for a CDC load to the target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded
* depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written
* without a first field to indicate the INSERT operation at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public void setCdcInsertsOnly(Boolean cdcInsertsOnly) {
this.cdcInsertsOnly = cdcInsertsOnly;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv
* or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a CDC load to the
* target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the
* source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are
* recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
* is set to true
, the first field of every CDC record is set to I to indicate the INSERT
* operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the source. For more
* information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean getCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsOnly
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv or
* .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether
* the row was inserted, updated, or deleted at the source database for a CDC load to the target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded
* depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written
* without a first field to indicate the INSERT operation at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcInsertsOnly(Boolean cdcInsertsOnly) {
setCdcInsertsOnly(cdcInsertsOnly);
return this;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv
* or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a CDC load to the
* target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the
* source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are
* recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
* is set to true
, the first field of every CDC record is set to I to indicate the INSERT
* operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the source. For more
* information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean isCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
*
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @param timestampColumnName
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for
* an Amazon S3 target.
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
*/
public void setTimestampColumnName(String timestampColumnName) {
this.timestampColumnName = timestampColumnName;
}
/**
*
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @return A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for
* an Amazon S3 target.
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
*/
public String getTimestampColumnName() {
return this.timestampColumnName;
}
/**
*
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @param timestampColumnName
* A value that when nonblank causes DMS to add a column with timestamp information to the endpoint data for
* an Amazon S3 target.
*
* DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withTimestampColumnName(String timestampColumnName) {
setTimestampColumnName(timestampColumnName);
return this;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set
* this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to
* query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @param parquetTimestampInMillisecond
* A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes
* all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS
* writes them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if
* you plan to query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public void setParquetTimestampInMillisecond(Boolean parquetTimestampInMillisecond) {
this.parquetTimestampInMillisecond = parquetTimestampInMillisecond;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set
* this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to
* query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @return A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes
* all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise,
* DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public Boolean getParquetTimestampInMillisecond() {
return this.parquetTimestampInMillisecond;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set
* this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to
* query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @param parquetTimestampInMillisecond
* A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes
* all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS
* writes them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if
* you plan to query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withParquetTimestampInMillisecond(Boolean parquetTimestampInMillisecond) {
setParquetTimestampInMillisecond(parquetTimestampInMillisecond);
return this;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
values. Set
* this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan to
* query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @return A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, DMS writes
* all TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise,
* DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or Glue.
*
*
*
* DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public Boolean isParquetTimestampInMillisecond() {
return this.parquetTimestampInMillisecond;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
*
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record
* is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of
* INSERT or UPDATE operations at the source. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsAndUpdates
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs
* from the source database are migrated to the .csv or .parquet file.
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC
* record is set to either I
or U
to indicate INSERT and UPDATE operations at the
* source. But if IncludeOpForFullLoad
is set to false
, CDC records are written
* without an indication of INSERT or UPDATE operations at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public void setCdcInsertsAndUpdates(Boolean cdcInsertsAndUpdates) {
this.cdcInsertsAndUpdates = cdcInsertsAndUpdates;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
*
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record
* is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of
* INSERT or UPDATE operations at the source. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs
* from the source database are migrated to the .csv or .parquet file.
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC
* record is set to either I
or U
to indicate INSERT and UPDATE operations at the
* source. But if IncludeOpForFullLoad
is set to false
, CDC records are written
* without an indication of INSERT or UPDATE operations at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean getCdcInsertsAndUpdates() {
return this.cdcInsertsAndUpdates;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
*
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record
* is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of
* INSERT or UPDATE operations at the source. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsAndUpdates
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs
* from the source database are migrated to the .csv or .parquet file.
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC
* record is set to either I
or U
to indicate INSERT and UPDATE operations at the
* source. But if IncludeOpForFullLoad
is set to false
, CDC records are written
* without an indication of INSERT or UPDATE operations at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcInsertsAndUpdates(Boolean cdcInsertsAndUpdates) {
setCdcInsertsAndUpdates(cdcInsertsAndUpdates);
return this;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
*
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC record
* is set to either I
or U
to indicate INSERT and UPDATE operations at the source. But if
* IncludeOpForFullLoad
is set to false
, CDC records are written without an indication of
* INSERT or UPDATE operations at the source. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, only INSERTs and UPDATEs
* from the source database are migrated to the .csv or .parquet file.
*
* DMS supports the use of the .parquet files in versions 3.4.7 and later.
*
*
*
* How these INSERTs and UPDATEs are recorded depends on the value of the IncludeOpForFullLoad
* parameter. If IncludeOpForFullLoad
is set to true
, the first field of every CDC
* record is set to either I
or U
to indicate INSERT and UPDATE operations at the
* source. But if IncludeOpForFullLoad
is set to false
, CDC records are written
* without an indication of INSERT or UPDATE operations at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the Database Migration Service User
* Guide..
*
*
*
* DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean isCdcInsertsAndUpdates() {
return this.cdcInsertsAndUpdates;
}
/**
*
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The
* default value is false
. For more information about date-based folder partitioning, see Using
* date-based folder partitioning.
*
*
* @param datePartitionEnabled
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit
* dates. The default value is false
. For more information about date-based folder partitioning,
* see Using date-based folder partitioning.
*/
public void setDatePartitionEnabled(Boolean datePartitionEnabled) {
this.datePartitionEnabled = datePartitionEnabled;
}
/**
*
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The
* default value is false
. For more information about date-based folder partitioning, see Using
* date-based folder partitioning.
*
*
* @return When set to true
, this parameter partitions S3 bucket folders based on transaction commit
* dates. The default value is false
. For more information about date-based folder
* partitioning, see Using date-based folder partitioning.
*/
public Boolean getDatePartitionEnabled() {
return this.datePartitionEnabled;
}
/**
*
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The
* default value is false
. For more information about date-based folder partitioning, see Using
* date-based folder partitioning.
*
*
* @param datePartitionEnabled
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit
* dates. The default value is false
. For more information about date-based folder partitioning,
* see Using date-based folder partitioning.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDatePartitionEnabled(Boolean datePartitionEnabled) {
setDatePartitionEnabled(datePartitionEnabled);
return this;
}
/**
*
* When set to true
, this parameter partitions S3 bucket folders based on transaction commit dates. The
* default value is false
. For more information about date-based folder partitioning, see Using
* date-based folder partitioning.
*
*
* @return When set to true
, this parameter partitions S3 bucket folders based on transaction commit
* dates. The default value is false
. For more information about date-based folder
* partitioning, see Using date-based folder partitioning.
*/
public Boolean isDatePartitionEnabled() {
return this.datePartitionEnabled;
}
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionSequence
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionSequenceValue
*/
public void setDatePartitionSequence(String datePartitionSequence) {
this.datePartitionSequence = datePartitionSequence;
}
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @return Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionSequenceValue
*/
public String getDatePartitionSequence() {
return this.datePartitionSequence;
}
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionSequence
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DatePartitionSequenceValue
*/
public S3Settings withDatePartitionSequence(String datePartitionSequence) {
setDatePartitionSequence(datePartitionSequence);
return this;
}
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionSequence
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionSequenceValue
*/
public void setDatePartitionSequence(DatePartitionSequenceValue datePartitionSequence) {
withDatePartitionSequence(datePartitionSequence);
}
/**
*
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionSequence
* Identifies the sequence of the date format to use during folder partitioning. The default value is
* YYYYMMDD
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DatePartitionSequenceValue
*/
public S3Settings withDatePartitionSequence(DatePartitionSequenceValue datePartitionSequence) {
this.datePartitionSequence = datePartitionSequence.toString();
return this;
}
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionDelimiter
* Specifies a date separating delimiter to use during folder partitioning. The default value is
* SLASH
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionDelimiterValue
*/
public void setDatePartitionDelimiter(String datePartitionDelimiter) {
this.datePartitionDelimiter = datePartitionDelimiter;
}
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @return Specifies a date separating delimiter to use during folder partitioning. The default value is
* SLASH
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionDelimiterValue
*/
public String getDatePartitionDelimiter() {
return this.datePartitionDelimiter;
}
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionDelimiter
* Specifies a date separating delimiter to use during folder partitioning. The default value is
* SLASH
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DatePartitionDelimiterValue
*/
public S3Settings withDatePartitionDelimiter(String datePartitionDelimiter) {
setDatePartitionDelimiter(datePartitionDelimiter);
return this;
}
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionDelimiter
* Specifies a date separating delimiter to use during folder partitioning. The default value is
* SLASH
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @see DatePartitionDelimiterValue
*/
public void setDatePartitionDelimiter(DatePartitionDelimiterValue datePartitionDelimiter) {
withDatePartitionDelimiter(datePartitionDelimiter);
}
/**
*
* Specifies a date separating delimiter to use during folder partitioning. The default value is SLASH
.
* Use this parameter when DatePartitionedEnabled
is set to true
.
*
*
* @param datePartitionDelimiter
* Specifies a date separating delimiter to use during folder partitioning. The default value is
* SLASH
. Use this parameter when DatePartitionedEnabled
is set to
* true
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DatePartitionDelimiterValue
*/
public S3Settings withDatePartitionDelimiter(DatePartitionDelimiterValue datePartitionDelimiter) {
this.datePartitionDelimiter = datePartitionDelimiter.toString();
return this;
}
/**
*
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format.
* If set to true
for columns not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these
* columns.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @param useCsvNoSupValue
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv
* format. If set to true
for columns not included in the supplemental log, DMS uses the value
* specified by CsvNoSupValue
. If not set or set to false
, DMS uses the null value for
* these columns.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*/
public void setUseCsvNoSupValue(Boolean useCsvNoSupValue) {
this.useCsvNoSupValue = useCsvNoSupValue;
}
/**
*
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format.
* If set to true
for columns not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these
* columns.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @return This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv
* format. If set to true
for columns not included in the supplemental log, DMS uses the value
* specified by CsvNoSupValue
. If not set or set to false
, DMS uses the null value for
* these columns.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*/
public Boolean getUseCsvNoSupValue() {
return this.useCsvNoSupValue;
}
/**
*
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format.
* If set to true
for columns not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these
* columns.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @param useCsvNoSupValue
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv
* format. If set to true
for columns not included in the supplemental log, DMS uses the value
* specified by CsvNoSupValue
. If not set or set to false
, DMS uses the null value for
* these columns.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withUseCsvNoSupValue(Boolean useCsvNoSupValue) {
setUseCsvNoSupValue(useCsvNoSupValue);
return this;
}
/**
*
* This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv format.
* If set to true
for columns not included in the supplemental log, DMS uses the value specified by
* CsvNoSupValue
. If not set or set to false
, DMS uses the null value for these
* columns.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @return This setting applies if the S3 output files during a change data capture (CDC) load are written in .csv
* format. If set to true
for columns not included in the supplemental log, DMS uses the value
* specified by CsvNoSupValue
. If not set or set to false
, DMS uses the null value for
* these columns.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*/
public Boolean isUseCsvNoSupValue() {
return this.useCsvNoSupValue;
}
/**
*
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in
* .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for all
* columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for
* these columns regardless of the UseCsvNoSupValue
setting.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @param csvNoSupValue
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are
* written in .csv format. If UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for
* all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null
* value for these columns regardless of the UseCsvNoSupValue
setting.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*/
public void setCsvNoSupValue(String csvNoSupValue) {
this.csvNoSupValue = csvNoSupValue;
}
/**
*
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in
* .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for all
* columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for
* these columns regardless of the UseCsvNoSupValue
setting.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @return This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are
* written in .csv format. If UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for
* all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null
* value for these columns regardless of the UseCsvNoSupValue
setting.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*/
public String getCsvNoSupValue() {
return this.csvNoSupValue;
}
/**
*
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are written in
* .csv format. If
* UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for all
* columns not included in the supplemental log. If you do not specify a string value, DMS uses the null value for
* these columns regardless of the UseCsvNoSupValue
setting.
*
*
*
* This setting is supported in DMS versions 3.4.1 and later.
*
*
*
* @param csvNoSupValue
* This setting only applies if your Amazon S3 output files during a change data capture (CDC) load are
* written in .csv format. If UseCsvNoSupValue
is set to true, specify a string value that you want DMS to use for
* all columns not included in the supplemental log. If you do not specify a string value, DMS uses the null
* value for these columns regardless of the UseCsvNoSupValue
setting.
*
* This setting is supported in DMS versions 3.4.1 and later.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvNoSupValue(String csvNoSupValue) {
setCsvNoSupValue(csvNoSupValue);
return this;
}
/**
*
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon
* S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @param preserveTransactions
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the
* Amazon S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*/
public void setPreserveTransactions(Boolean preserveTransactions) {
this.preserveTransactions = preserveTransactions;
}
/**
*
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon
* S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @return If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the
* Amazon S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*/
public Boolean getPreserveTransactions() {
return this.preserveTransactions;
}
/**
*
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon
* S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @param preserveTransactions
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the
* Amazon S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
* This setting is supported in DMS versions 3.4.2 and later.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withPreserveTransactions(Boolean preserveTransactions) {
setPreserveTransactions(preserveTransactions);
return this;
}
/**
*
* If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the Amazon
* S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @return If set to true
, DMS saves the transaction order for a change data capture (CDC) load on the
* Amazon S3 target specified by
* CdcPath
. For more information, see Capturing data changes (CDC) including transaction order on the S3 target.
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*/
public Boolean isPreserveTransactions() {
return this.preserveTransactions;
}
/**
*
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change
* data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this parameter to a
* folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC
* folder path in either your S3 target working directory or the S3 target location specified by
* BucketFolder
and
* BucketName
.
*
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS creates
* the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the CDC
* folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @param cdcPath
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures
* change data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path
* and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this
* parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load.
* DMS creates this CDC folder path in either your S3 target working directory or the S3 target location
* specified by BucketFolder
and BucketName
.
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS
* creates the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the
* CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*/
public void setCdcPath(String cdcPath) {
this.cdcPath = cdcPath;
}
/**
*
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change
* data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this parameter to a
* folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC
* folder path in either your S3 target working directory or the S3 target location specified by
* BucketFolder
and
* BucketName
.
*
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS creates
* the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the CDC
* folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @return Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures
* change data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path
* and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this
* parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load.
* DMS creates this CDC folder path in either your S3 target working directory or the S3 target location
* specified by BucketFolder
and BucketName
.
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS
* creates the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the
* CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*/
public String getCdcPath() {
return this.cdcPath;
}
/**
*
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures change
* data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path and replicates
* the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this parameter to a
* folder path on your S3 target where DMS can save the transaction order for the CDC load. DMS creates this CDC
* folder path in either your S3 target working directory or the S3 target location specified by
* BucketFolder
and
* BucketName
.
*
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS creates
* the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the CDC
* folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
*
*
* @param cdcPath
* Specifies the folder path of CDC files. For an S3 source, this setting is required if a task captures
* change data; otherwise, it's optional. If CdcPath
is set, DMS reads CDC files from this path
* and replicates the data changes to the target endpoint. For an S3 target if you set PreserveTransactions
to true
, DMS verifies that you have set this
* parameter to a folder path on your S3 target where DMS can save the transaction order for the CDC load.
* DMS creates this CDC folder path in either your S3 target working directory or the S3 target location
* specified by BucketFolder
and BucketName
.
*
* For example, if you specify CdcPath
as MyChangedData
, and you specify
* BucketName
as MyTargetBucket
but do not specify BucketFolder
, DMS
* creates the CDC folder path following: MyTargetBucket/MyChangedData
.
*
*
* If you specify the same CdcPath
, and you specify BucketName
as
* MyTargetBucket
and BucketFolder
as MyTargetData
, DMS creates the
* CDC folder path following: MyTargetBucket/MyTargetData/MyChangedData
.
*
*
* For more information on CDC including transaction order on an S3 target, see Capturing data changes (CDC) including transaction order on the S3 target.
*
*
*
* This setting is supported in DMS versions 3.4.2 and later.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcPath(String cdcPath) {
setCdcPath(cdcPath);
return this;
}
/**
*
* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data
* is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is set to
* true
, each row of the timestamp column contains the task start time. For CDC loads, each row of the
* timestamp column contains the transaction commit time.
*
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load timestamp in
* the timestamp column increments with the time data arrives at the target.
*
*
* @param useTaskStartTimeForFullLoadTimestamp
* When set to true, this parameter uses the task start time as the timestamp column value instead of the
* time data is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is
* set to true
, each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load
* timestamp in the timestamp column increments with the time data arrives at the target.
*/
public void setUseTaskStartTimeForFullLoadTimestamp(Boolean useTaskStartTimeForFullLoadTimestamp) {
this.useTaskStartTimeForFullLoadTimestamp = useTaskStartTimeForFullLoadTimestamp;
}
/**
*
* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data
* is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is set to
* true
, each row of the timestamp column contains the task start time. For CDC loads, each row of the
* timestamp column contains the transaction commit time.
*
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load timestamp in
* the timestamp column increments with the time data arrives at the target.
*
*
* @return When set to true, this parameter uses the task start time as the timestamp column value instead of the
* time data is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is
* set to true
, each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load
* timestamp in the timestamp column increments with the time data arrives at the target.
*/
public Boolean getUseTaskStartTimeForFullLoadTimestamp() {
return this.useTaskStartTimeForFullLoadTimestamp;
}
/**
*
* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data
* is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is set to
* true
, each row of the timestamp column contains the task start time. For CDC loads, each row of the
* timestamp column contains the transaction commit time.
*
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load timestamp in
* the timestamp column increments with the time data arrives at the target.
*
*
* @param useTaskStartTimeForFullLoadTimestamp
* When set to true, this parameter uses the task start time as the timestamp column value instead of the
* time data is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is
* set to true
, each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load
* timestamp in the timestamp column increments with the time data arrives at the target.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withUseTaskStartTimeForFullLoadTimestamp(Boolean useTaskStartTimeForFullLoadTimestamp) {
setUseTaskStartTimeForFullLoadTimestamp(useTaskStartTimeForFullLoadTimestamp);
return this;
}
/**
*
* When set to true, this parameter uses the task start time as the timestamp column value instead of the time data
* is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is set to
* true
, each row of the timestamp column contains the task start time. For CDC loads, each row of the
* timestamp column contains the transaction commit time.
*
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load timestamp in
* the timestamp column increments with the time data arrives at the target.
*
*
* @return When set to true, this parameter uses the task start time as the timestamp column value instead of the
* time data is written to target. For full load, when useTaskStartTimeForFullLoadTimestamp
is
* set to true
, each row of the timestamp column contains the task start time. For CDC loads,
* each row of the timestamp column contains the transaction commit time.
*
* When useTaskStartTimeForFullLoadTimestamp
is set to false
, the full load
* timestamp in the timestamp column increments with the time data arrives at the target.
*/
public Boolean isUseTaskStartTimeForFullLoadTimestamp() {
return this.useTaskStartTimeForFullLoadTimestamp;
}
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*
* @param cannedAclForObjects
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an
* Amazon S3 bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
* @see CannedAclForObjectsValue
*/
public void setCannedAclForObjects(String cannedAclForObjects) {
this.cannedAclForObjects = cannedAclForObjects;
}
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*
* @return A value that enables DMS to specify a predefined (canned) access control list for objects created in an
* Amazon S3 bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
* @see CannedAclForObjectsValue
*/
public String getCannedAclForObjects() {
return this.cannedAclForObjects;
}
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*
* @param cannedAclForObjects
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an
* Amazon S3 bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CannedAclForObjectsValue
*/
public S3Settings withCannedAclForObjects(String cannedAclForObjects) {
setCannedAclForObjects(cannedAclForObjects);
return this;
}
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*
* @param cannedAclForObjects
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an
* Amazon S3 bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
* @see CannedAclForObjectsValue
*/
public void setCannedAclForObjects(CannedAclForObjectsValue cannedAclForObjects) {
withCannedAclForObjects(cannedAclForObjects);
}
/**
*
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an Amazon S3
* bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
*
*
* @param cannedAclForObjects
* A value that enables DMS to specify a predefined (canned) access control list for objects created in an
* Amazon S3 bucket as .csv or .parquet files. For more information about Amazon S3 canned ACLs, see Canned ACL in the
* Amazon S3 Developer Guide.
*
* The default value is NONE. Valid values include NONE, PRIVATE, PUBLIC_READ, PUBLIC_READ_WRITE,
* AUTHENTICATED_READ, AWS_EXEC_READ, BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CannedAclForObjectsValue
*/
public S3Settings withCannedAclForObjects(CannedAclForObjectsValue cannedAclForObjects) {
this.cannedAclForObjects = cannedAclForObjects.toString();
return this;
}
/**
*
* An optional parameter that, when set to true
or y
, you can use to add column name
* information to the .csv output file.
*
*
* The default value is false
. Valid values are true
, false
, y
,
* and n
.
*
*
* @param addColumnName
* An optional parameter that, when set to true
or y
, you can use to add column
* name information to the .csv output file.
*
* The default value is false
. Valid values are true
, false
,
* y
, and n
.
*/
public void setAddColumnName(Boolean addColumnName) {
this.addColumnName = addColumnName;
}
/**
*
* An optional parameter that, when set to true
or y
, you can use to add column name
* information to the .csv output file.
*
*
* The default value is false
. Valid values are true
, false
, y
,
* and n
.
*
*
* @return An optional parameter that, when set to true
or y
, you can use to add column
* name information to the .csv output file.
*
* The default value is false
. Valid values are true
, false
,
* y
, and n
.
*/
public Boolean getAddColumnName() {
return this.addColumnName;
}
/**
*
* An optional parameter that, when set to true
or y
, you can use to add column name
* information to the .csv output file.
*
*
* The default value is false
. Valid values are true
, false
, y
,
* and n
.
*
*
* @param addColumnName
* An optional parameter that, when set to true
or y
, you can use to add column
* name information to the .csv output file.
*
* The default value is false
. Valid values are true
, false
,
* y
, and n
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withAddColumnName(Boolean addColumnName) {
setAddColumnName(addColumnName);
return this;
}
/**
*
* An optional parameter that, when set to true
or y
, you can use to add column name
* information to the .csv output file.
*
*
* The default value is false
. Valid values are true
, false
, y
,
* and n
.
*
*
* @return An optional parameter that, when set to true
or y
, you can use to add column
* name information to the .csv output file.
*
* The default value is false
. Valid values are true
, false
,
* y
, and n
.
*/
public Boolean isAddColumnName() {
return this.addColumnName;
}
/**
*
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*
*
* @param cdcMaxBatchInterval
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*/
public void setCdcMaxBatchInterval(Integer cdcMaxBatchInterval) {
this.cdcMaxBatchInterval = cdcMaxBatchInterval;
}
/**
*
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*
*
* @return Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*/
public Integer getCdcMaxBatchInterval() {
return this.cdcMaxBatchInterval;
}
/**
*
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
*
*
* @param cdcMaxBatchInterval
* Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3.
*
* When CdcMaxBatchInterval
and CdcMinFileSize
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 60 seconds.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcMaxBatchInterval(Integer cdcMaxBatchInterval) {
setCdcMaxBatchInterval(cdcMaxBatchInterval);
return this;
}
/**
*
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*
*
* @param cdcMinFileSize
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*/
public void setCdcMinFileSize(Integer cdcMinFileSize) {
this.cdcMinFileSize = cdcMinFileSize;
}
/**
*
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*
*
* @return Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*/
public Integer getCdcMinFileSize() {
return this.cdcMinFileSize;
}
/**
*
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write is
* triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
*
*
* @param cdcMinFileSize
* Minimum file size, defined in kilobytes, to reach for a file output to Amazon S3.
*
* When CdcMinFileSize
and CdcMaxBatchInterval
are both specified, the file write
* is triggered by whichever parameter condition is met first within an DMS CloudFormation template.
*
*
* The default value is 32 MB.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcMinFileSize(Integer cdcMinFileSize) {
setCdcMinFileSize(cdcMinFileSize);
return this;
}
/**
*
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this
* parameter to pass a user-defined string as null when writing to the target. For example, when target columns are
* nullable, you can use this option to differentiate between the empty string value and the null value. So, if you
* set this parameter value to the empty string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
*
*
* The default value is NULL
. Valid values include any valid string.
*
*
* @param csvNullValue
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can
* use this parameter to pass a user-defined string as null when writing to the target. For example, when
* target columns are nullable, you can use this option to differentiate between the empty string value and
* the null value. So, if you set this parameter value to the empty string ("" or ''), DMS treats the empty
* string as the null value instead of NULL
.
*
* The default value is NULL
. Valid values include any valid string.
*/
public void setCsvNullValue(String csvNullValue) {
this.csvNullValue = csvNullValue;
}
/**
*
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this
* parameter to pass a user-defined string as null when writing to the target. For example, when target columns are
* nullable, you can use this option to differentiate between the empty string value and the null value. So, if you
* set this parameter value to the empty string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
*
*
* The default value is NULL
. Valid values include any valid string.
*
*
* @return An optional parameter that specifies how DMS treats null values. While handling the null value, you can
* use this parameter to pass a user-defined string as null when writing to the target. For example, when
* target columns are nullable, you can use this option to differentiate between the empty string value and
* the null value. So, if you set this parameter value to the empty string ("" or ''), DMS treats the empty
* string as the null value instead of NULL
.
*
* The default value is NULL
. Valid values include any valid string.
*/
public String getCsvNullValue() {
return this.csvNullValue;
}
/**
*
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can use this
* parameter to pass a user-defined string as null when writing to the target. For example, when target columns are
* nullable, you can use this option to differentiate between the empty string value and the null value. So, if you
* set this parameter value to the empty string ("" or ''), DMS treats the empty string as the null value instead of
* NULL
.
*
*
* The default value is NULL
. Valid values include any valid string.
*
*
* @param csvNullValue
* An optional parameter that specifies how DMS treats null values. While handling the null value, you can
* use this parameter to pass a user-defined string as null when writing to the target. For example, when
* target columns are nullable, you can use this option to differentiate between the empty string value and
* the null value. So, if you set this parameter value to the empty string ("" or ''), DMS treats the empty
* string as the null value instead of NULL
.
*
* The default value is NULL
. Valid values include any valid string.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvNullValue(String csvNullValue) {
setCsvNullValue(csvNullValue);
return this;
}
/**
*
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the feature;
* a value of 0 turns off the feature.
*
*
* The default is 0.
*
*
* @param ignoreHeaderRows
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the
* feature; a value of 0 turns off the feature.
*
* The default is 0.
*/
public void setIgnoreHeaderRows(Integer ignoreHeaderRows) {
this.ignoreHeaderRows = ignoreHeaderRows;
}
/**
*
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the feature;
* a value of 0 turns off the feature.
*
*
* The default is 0.
*
*
* @return When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the
* feature; a value of 0 turns off the feature.
*
* The default is 0.
*/
public Integer getIgnoreHeaderRows() {
return this.ignoreHeaderRows;
}
/**
*
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the feature;
* a value of 0 turns off the feature.
*
*
* The default is 0.
*
*
* @param ignoreHeaderRows
* When this value is set to 1, DMS ignores the first row header in a .csv file. A value of 1 turns on the
* feature; a value of 0 turns off the feature.
*
* The default is 0.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withIgnoreHeaderRows(Integer ignoreHeaderRows) {
setIgnoreHeaderRows(ignoreHeaderRows);
return this;
}
/**
*
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3 target
* during full load.
*
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*
*
* @param maxFileSize
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3
* target during full load.
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/
public void setMaxFileSize(Integer maxFileSize) {
this.maxFileSize = maxFileSize;
}
/**
*
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3 target
* during full load.
*
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*
*
* @return A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3
* target during full load.
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*/
public Integer getMaxFileSize() {
return this.maxFileSize;
}
/**
*
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3 target
* during full load.
*
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
*
*
* @param maxFileSize
* A value that specifies the maximum size (in KB) of any .csv file to be created while migrating to an S3
* target during full load.
*
* The default value is 1,048,576 KB (1 GB). Valid values include 1 to 1,048,576.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withMaxFileSize(Integer maxFileSize) {
setMaxFileSize(maxFileSize);
return this;
}
/**
*
* For an S3 source, when this value is set to true
or y
, each leading double quotation
* mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied to the target as is. In this
* case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the
* string, because it signals the end of the value.
*
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon
* S3 using .csv file format only. When this value is set to true
or y
using Amazon S3 as
* a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an
* additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*
*
* @param rfc4180
* For an S3 source, when this value is set to true
or y
, each leading double
* quotation mark has to be followed by an ending double quotation mark. This formatting complies with RFC
* 4180. When this value is set to false
or n
, string literals are copied to the
* target as is. In this case, a delimiter (row or column) signals the end of the field. Thus, you can't use
* a delimiter as part of the string, because it signals the end of the value.
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to
* Amazon S3 using .csv file format only. When this value is set to true
or y
using
* Amazon S3 as a target, if the data has quotation marks or newline characters in it, DMS encloses the
* entire column with an additional pair of double quotation marks ("). Every quotation mark within the data
* is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*/
public void setRfc4180(Boolean rfc4180) {
this.rfc4180 = rfc4180;
}
/**
*
* For an S3 source, when this value is set to true
or y
, each leading double quotation
* mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied to the target as is. In this
* case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the
* string, because it signals the end of the value.
*
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon
* S3 using .csv file format only. When this value is set to true
or y
using Amazon S3 as
* a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an
* additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*
*
* @return For an S3 source, when this value is set to true
or y
, each leading double
* quotation mark has to be followed by an ending double quotation mark. This formatting complies with RFC
* 4180. When this value is set to false
or n
, string literals are copied to the
* target as is. In this case, a delimiter (row or column) signals the end of the field. Thus, you can't use
* a delimiter as part of the string, because it signals the end of the value.
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to
* Amazon S3 using .csv file format only. When this value is set to true
or y
* using Amazon S3 as a target, if the data has quotation marks or newline characters in it, DMS encloses
* the entire column with an additional pair of double quotation marks ("). Every quotation mark within the
* data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*/
public Boolean getRfc4180() {
return this.rfc4180;
}
/**
*
* For an S3 source, when this value is set to true
or y
, each leading double quotation
* mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied to the target as is. In this
* case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the
* string, because it signals the end of the value.
*
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon
* S3 using .csv file format only. When this value is set to true
or y
using Amazon S3 as
* a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an
* additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*
*
* @param rfc4180
* For an S3 source, when this value is set to true
or y
, each leading double
* quotation mark has to be followed by an ending double quotation mark. This formatting complies with RFC
* 4180. When this value is set to false
or n
, string literals are copied to the
* target as is. In this case, a delimiter (row or column) signals the end of the field. Thus, you can't use
* a delimiter as part of the string, because it signals the end of the value.
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to
* Amazon S3 using .csv file format only. When this value is set to true
or y
using
* Amazon S3 as a target, if the data has quotation marks or newline characters in it, DMS encloses the
* entire column with an additional pair of double quotation marks ("). Every quotation mark within the data
* is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withRfc4180(Boolean rfc4180) {
setRfc4180(rfc4180);
return this;
}
/**
*
* For an S3 source, when this value is set to true
or y
, each leading double quotation
* mark has to be followed by an ending double quotation mark. This formatting complies with RFC 4180. When this
* value is set to false
or n
, string literals are copied to the target as is. In this
* case, a delimiter (row or column) signals the end of the field. Thus, you can't use a delimiter as part of the
* string, because it signals the end of the value.
*
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to Amazon
* S3 using .csv file format only. When this value is set to true
or y
using Amazon S3 as
* a target, if the data has quotation marks or newline characters in it, DMS encloses the entire column with an
* additional pair of double quotation marks ("). Every quotation mark within the data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*
*
* @return For an S3 source, when this value is set to true
or y
, each leading double
* quotation mark has to be followed by an ending double quotation mark. This formatting complies with RFC
* 4180. When this value is set to false
or n
, string literals are copied to the
* target as is. In this case, a delimiter (row or column) signals the end of the field. Thus, you can't use
* a delimiter as part of the string, because it signals the end of the value.
*
* For an S3 target, an optional parameter used to set behavior to comply with RFC 4180 for data migrated to
* Amazon S3 using .csv file format only. When this value is set to true
or y
* using Amazon S3 as a target, if the data has quotation marks or newline characters in it, DMS encloses
* the entire column with an additional pair of double quotation marks ("). Every quotation mark within the
* data is repeated twice.
*
*
* The default value is true
. Valid values include true
, false
,
* y
, and n
.
*/
public Boolean isRfc4180() {
return this.rfc4180;
}
/**
*
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC time into
* a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is
* generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled
is
* set to true
, as shown in the following example.
*
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*
*
* @param datePartitionTimezone
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC
* time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC
* filename is generated. The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the following example.
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*/
public void setDatePartitionTimezone(String datePartitionTimezone) {
this.datePartitionTimezone = datePartitionTimezone;
}
/**
*
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC time into
* a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is
* generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled
is
* set to true
, as shown in the following example.
*
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*
*
* @return When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC
* time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC
* filename is generated. The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the following example.
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*/
public String getDatePartitionTimezone() {
return this.datePartitionTimezone;
}
/**
*
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC time into
* a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is
* generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled
is
* set to true
, as shown in the following example.
*
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
*
*
* @param datePartitionTimezone
* When creating an S3 target endpoint, set DatePartitionTimezone
to convert the current UTC
* time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC
* filename is generated. The time zone format is Area/Location. Use this parameter when
* DatePartitionedEnabled
is set to true
, as shown in the following example.
*
* s3-settings='{"DatePartitionEnabled": true, "DatePartitionSequence": "YYYYMMDDHH", "DatePartitionDelimiter": "SLASH", "DatePartitionTimezone":"Asia/Seoul", "BucketName": "dms-nattarat-test"}'
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDatePartitionTimezone(String datePartitionTimezone) {
setDatePartitionTimezone(datePartitionTimezone);
return this;
}
/**
*
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data. The
* default value is false
.
*
*
* @param addTrailingPaddingCharacter
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data.
* The default value is false
.
*/
public void setAddTrailingPaddingCharacter(Boolean addTrailingPaddingCharacter) {
this.addTrailingPaddingCharacter = addTrailingPaddingCharacter;
}
/**
*
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data. The
* default value is false
.
*
*
* @return Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string
* data. The default value is false
.
*/
public Boolean getAddTrailingPaddingCharacter() {
return this.addTrailingPaddingCharacter;
}
/**
*
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data. The
* default value is false
.
*
*
* @param addTrailingPaddingCharacter
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data.
* The default value is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withAddTrailingPaddingCharacter(Boolean addTrailingPaddingCharacter) {
setAddTrailingPaddingCharacter(addTrailingPaddingCharacter);
return this;
}
/**
*
* Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string data. The
* default value is false
.
*
*
* @return Use the S3 target endpoint setting AddTrailingPaddingCharacter
to add padding on string
* data. The default value is false
.
*/
public Boolean isAddTrailingPaddingCharacter() {
return this.addTrailingPaddingCharacter;
}
/**
*
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint setting.
*
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner
* against the specified parameter.
*
*
* @param expectedBucketOwner
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint
* setting.
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the
* bucket owner against the specified parameter.
*/
public void setExpectedBucketOwner(String expectedBucketOwner) {
this.expectedBucketOwner = expectedBucketOwner;
}
/**
*
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint setting.
*
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner
* against the specified parameter.
*
*
* @return To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint
* setting.
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the
* bucket owner against the specified parameter.
*/
public String getExpectedBucketOwner() {
return this.expectedBucketOwner;
}
/**
*
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint setting.
*
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the bucket owner
* against the specified parameter.
*
*
* @param expectedBucketOwner
* To specify a bucket owner and prevent sniping, you can use the ExpectedBucketOwner
endpoint
* setting.
*
* Example: --s3-settings='{"ExpectedBucketOwner": "AWS_Account_ID"}'
*
*
* When you make a request to test a connection or perform a migration, S3 checks the account ID of the
* bucket owner against the specified parameter.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withExpectedBucketOwner(String expectedBucketOwner) {
setExpectedBucketOwner(expectedBucketOwner);
return this;
}
/**
*
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your
* data.
*
*
* @param glueCatalogGeneration
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query
* your data.
*/
public void setGlueCatalogGeneration(Boolean glueCatalogGeneration) {
this.glueCatalogGeneration = glueCatalogGeneration;
}
/**
*
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your
* data.
*
*
* @return When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query
* your data.
*/
public Boolean getGlueCatalogGeneration() {
return this.glueCatalogGeneration;
}
/**
*
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your
* data.
*
*
* @param glueCatalogGeneration
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query
* your data.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withGlueCatalogGeneration(Boolean glueCatalogGeneration) {
setGlueCatalogGeneration(glueCatalogGeneration);
return this;
}
/**
*
* When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query your
* data.
*
*
* @return When true, allows Glue to catalog your S3 bucket. Creating an Glue catalog lets you use Athena to query
* your data.
*/
public Boolean isGlueCatalogGeneration() {
return this.glueCatalogGeneration;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getServiceAccessRoleArn() != null)
sb.append("ServiceAccessRoleArn: ").append(getServiceAccessRoleArn()).append(",");
if (getExternalTableDefinition() != null)
sb.append("ExternalTableDefinition: ").append(getExternalTableDefinition()).append(",");
if (getCsvRowDelimiter() != null)
sb.append("CsvRowDelimiter: ").append(getCsvRowDelimiter()).append(",");
if (getCsvDelimiter() != null)
sb.append("CsvDelimiter: ").append(getCsvDelimiter()).append(",");
if (getBucketFolder() != null)
sb.append("BucketFolder: ").append(getBucketFolder()).append(",");
if (getBucketName() != null)
sb.append("BucketName: ").append(getBucketName()).append(",");
if (getCompressionType() != null)
sb.append("CompressionType: ").append(getCompressionType()).append(",");
if (getEncryptionMode() != null)
sb.append("EncryptionMode: ").append(getEncryptionMode()).append(",");
if (getServerSideEncryptionKmsKeyId() != null)
sb.append("ServerSideEncryptionKmsKeyId: ").append(getServerSideEncryptionKmsKeyId()).append(",");
if (getDataFormat() != null)
sb.append("DataFormat: ").append(getDataFormat()).append(",");
if (getEncodingType() != null)
sb.append("EncodingType: ").append(getEncodingType()).append(",");
if (getDictPageSizeLimit() != null)
sb.append("DictPageSizeLimit: ").append(getDictPageSizeLimit()).append(",");
if (getRowGroupLength() != null)
sb.append("RowGroupLength: ").append(getRowGroupLength()).append(",");
if (getDataPageSize() != null)
sb.append("DataPageSize: ").append(getDataPageSize()).append(",");
if (getParquetVersion() != null)
sb.append("ParquetVersion: ").append(getParquetVersion()).append(",");
if (getEnableStatistics() != null)
sb.append("EnableStatistics: ").append(getEnableStatistics()).append(",");
if (getIncludeOpForFullLoad() != null)
sb.append("IncludeOpForFullLoad: ").append(getIncludeOpForFullLoad()).append(",");
if (getCdcInsertsOnly() != null)
sb.append("CdcInsertsOnly: ").append(getCdcInsertsOnly()).append(",");
if (getTimestampColumnName() != null)
sb.append("TimestampColumnName: ").append(getTimestampColumnName()).append(",");
if (getParquetTimestampInMillisecond() != null)
sb.append("ParquetTimestampInMillisecond: ").append(getParquetTimestampInMillisecond()).append(",");
if (getCdcInsertsAndUpdates() != null)
sb.append("CdcInsertsAndUpdates: ").append(getCdcInsertsAndUpdates()).append(",");
if (getDatePartitionEnabled() != null)
sb.append("DatePartitionEnabled: ").append(getDatePartitionEnabled()).append(",");
if (getDatePartitionSequence() != null)
sb.append("DatePartitionSequence: ").append(getDatePartitionSequence()).append(",");
if (getDatePartitionDelimiter() != null)
sb.append("DatePartitionDelimiter: ").append(getDatePartitionDelimiter()).append(",");
if (getUseCsvNoSupValue() != null)
sb.append("UseCsvNoSupValue: ").append(getUseCsvNoSupValue()).append(",");
if (getCsvNoSupValue() != null)
sb.append("CsvNoSupValue: ").append(getCsvNoSupValue()).append(",");
if (getPreserveTransactions() != null)
sb.append("PreserveTransactions: ").append(getPreserveTransactions()).append(",");
if (getCdcPath() != null)
sb.append("CdcPath: ").append(getCdcPath()).append(",");
if (getUseTaskStartTimeForFullLoadTimestamp() != null)
sb.append("UseTaskStartTimeForFullLoadTimestamp: ").append(getUseTaskStartTimeForFullLoadTimestamp()).append(",");
if (getCannedAclForObjects() != null)
sb.append("CannedAclForObjects: ").append(getCannedAclForObjects()).append(",");
if (getAddColumnName() != null)
sb.append("AddColumnName: ").append(getAddColumnName()).append(",");
if (getCdcMaxBatchInterval() != null)
sb.append("CdcMaxBatchInterval: ").append(getCdcMaxBatchInterval()).append(",");
if (getCdcMinFileSize() != null)
sb.append("CdcMinFileSize: ").append(getCdcMinFileSize()).append(",");
if (getCsvNullValue() != null)
sb.append("CsvNullValue: ").append(getCsvNullValue()).append(",");
if (getIgnoreHeaderRows() != null)
sb.append("IgnoreHeaderRows: ").append(getIgnoreHeaderRows()).append(",");
if (getMaxFileSize() != null)
sb.append("MaxFileSize: ").append(getMaxFileSize()).append(",");
if (getRfc4180() != null)
sb.append("Rfc4180: ").append(getRfc4180()).append(",");
if (getDatePartitionTimezone() != null)
sb.append("DatePartitionTimezone: ").append(getDatePartitionTimezone()).append(",");
if (getAddTrailingPaddingCharacter() != null)
sb.append("AddTrailingPaddingCharacter: ").append(getAddTrailingPaddingCharacter()).append(",");
if (getExpectedBucketOwner() != null)
sb.append("ExpectedBucketOwner: ").append(getExpectedBucketOwner()).append(",");
if (getGlueCatalogGeneration() != null)
sb.append("GlueCatalogGeneration: ").append(getGlueCatalogGeneration());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof S3Settings == false)
return false;
S3Settings other = (S3Settings) obj;
if (other.getServiceAccessRoleArn() == null ^ this.getServiceAccessRoleArn() == null)
return false;
if (other.getServiceAccessRoleArn() != null && other.getServiceAccessRoleArn().equals(this.getServiceAccessRoleArn()) == false)
return false;
if (other.getExternalTableDefinition() == null ^ this.getExternalTableDefinition() == null)
return false;
if (other.getExternalTableDefinition() != null && other.getExternalTableDefinition().equals(this.getExternalTableDefinition()) == false)
return false;
if (other.getCsvRowDelimiter() == null ^ this.getCsvRowDelimiter() == null)
return false;
if (other.getCsvRowDelimiter() != null && other.getCsvRowDelimiter().equals(this.getCsvRowDelimiter()) == false)
return false;
if (other.getCsvDelimiter() == null ^ this.getCsvDelimiter() == null)
return false;
if (other.getCsvDelimiter() != null && other.getCsvDelimiter().equals(this.getCsvDelimiter()) == false)
return false;
if (other.getBucketFolder() == null ^ this.getBucketFolder() == null)
return false;
if (other.getBucketFolder() != null && other.getBucketFolder().equals(this.getBucketFolder()) == false)
return false;
if (other.getBucketName() == null ^ this.getBucketName() == null)
return false;
if (other.getBucketName() != null && other.getBucketName().equals(this.getBucketName()) == false)
return false;
if (other.getCompressionType() == null ^ this.getCompressionType() == null)
return false;
if (other.getCompressionType() != null && other.getCompressionType().equals(this.getCompressionType()) == false)
return false;
if (other.getEncryptionMode() == null ^ this.getEncryptionMode() == null)
return false;
if (other.getEncryptionMode() != null && other.getEncryptionMode().equals(this.getEncryptionMode()) == false)
return false;
if (other.getServerSideEncryptionKmsKeyId() == null ^ this.getServerSideEncryptionKmsKeyId() == null)
return false;
if (other.getServerSideEncryptionKmsKeyId() != null && other.getServerSideEncryptionKmsKeyId().equals(this.getServerSideEncryptionKmsKeyId()) == false)
return false;
if (other.getDataFormat() == null ^ this.getDataFormat() == null)
return false;
if (other.getDataFormat() != null && other.getDataFormat().equals(this.getDataFormat()) == false)
return false;
if (other.getEncodingType() == null ^ this.getEncodingType() == null)
return false;
if (other.getEncodingType() != null && other.getEncodingType().equals(this.getEncodingType()) == false)
return false;
if (other.getDictPageSizeLimit() == null ^ this.getDictPageSizeLimit() == null)
return false;
if (other.getDictPageSizeLimit() != null && other.getDictPageSizeLimit().equals(this.getDictPageSizeLimit()) == false)
return false;
if (other.getRowGroupLength() == null ^ this.getRowGroupLength() == null)
return false;
if (other.getRowGroupLength() != null && other.getRowGroupLength().equals(this.getRowGroupLength()) == false)
return false;
if (other.getDataPageSize() == null ^ this.getDataPageSize() == null)
return false;
if (other.getDataPageSize() != null && other.getDataPageSize().equals(this.getDataPageSize()) == false)
return false;
if (other.getParquetVersion() == null ^ this.getParquetVersion() == null)
return false;
if (other.getParquetVersion() != null && other.getParquetVersion().equals(this.getParquetVersion()) == false)
return false;
if (other.getEnableStatistics() == null ^ this.getEnableStatistics() == null)
return false;
if (other.getEnableStatistics() != null && other.getEnableStatistics().equals(this.getEnableStatistics()) == false)
return false;
if (other.getIncludeOpForFullLoad() == null ^ this.getIncludeOpForFullLoad() == null)
return false;
if (other.getIncludeOpForFullLoad() != null && other.getIncludeOpForFullLoad().equals(this.getIncludeOpForFullLoad()) == false)
return false;
if (other.getCdcInsertsOnly() == null ^ this.getCdcInsertsOnly() == null)
return false;
if (other.getCdcInsertsOnly() != null && other.getCdcInsertsOnly().equals(this.getCdcInsertsOnly()) == false)
return false;
if (other.getTimestampColumnName() == null ^ this.getTimestampColumnName() == null)
return false;
if (other.getTimestampColumnName() != null && other.getTimestampColumnName().equals(this.getTimestampColumnName()) == false)
return false;
if (other.getParquetTimestampInMillisecond() == null ^ this.getParquetTimestampInMillisecond() == null)
return false;
if (other.getParquetTimestampInMillisecond() != null
&& other.getParquetTimestampInMillisecond().equals(this.getParquetTimestampInMillisecond()) == false)
return false;
if (other.getCdcInsertsAndUpdates() == null ^ this.getCdcInsertsAndUpdates() == null)
return false;
if (other.getCdcInsertsAndUpdates() != null && other.getCdcInsertsAndUpdates().equals(this.getCdcInsertsAndUpdates()) == false)
return false;
if (other.getDatePartitionEnabled() == null ^ this.getDatePartitionEnabled() == null)
return false;
if (other.getDatePartitionEnabled() != null && other.getDatePartitionEnabled().equals(this.getDatePartitionEnabled()) == false)
return false;
if (other.getDatePartitionSequence() == null ^ this.getDatePartitionSequence() == null)
return false;
if (other.getDatePartitionSequence() != null && other.getDatePartitionSequence().equals(this.getDatePartitionSequence()) == false)
return false;
if (other.getDatePartitionDelimiter() == null ^ this.getDatePartitionDelimiter() == null)
return false;
if (other.getDatePartitionDelimiter() != null && other.getDatePartitionDelimiter().equals(this.getDatePartitionDelimiter()) == false)
return false;
if (other.getUseCsvNoSupValue() == null ^ this.getUseCsvNoSupValue() == null)
return false;
if (other.getUseCsvNoSupValue() != null && other.getUseCsvNoSupValue().equals(this.getUseCsvNoSupValue()) == false)
return false;
if (other.getCsvNoSupValue() == null ^ this.getCsvNoSupValue() == null)
return false;
if (other.getCsvNoSupValue() != null && other.getCsvNoSupValue().equals(this.getCsvNoSupValue()) == false)
return false;
if (other.getPreserveTransactions() == null ^ this.getPreserveTransactions() == null)
return false;
if (other.getPreserveTransactions() != null && other.getPreserveTransactions().equals(this.getPreserveTransactions()) == false)
return false;
if (other.getCdcPath() == null ^ this.getCdcPath() == null)
return false;
if (other.getCdcPath() != null && other.getCdcPath().equals(this.getCdcPath()) == false)
return false;
if (other.getUseTaskStartTimeForFullLoadTimestamp() == null ^ this.getUseTaskStartTimeForFullLoadTimestamp() == null)
return false;
if (other.getUseTaskStartTimeForFullLoadTimestamp() != null
&& other.getUseTaskStartTimeForFullLoadTimestamp().equals(this.getUseTaskStartTimeForFullLoadTimestamp()) == false)
return false;
if (other.getCannedAclForObjects() == null ^ this.getCannedAclForObjects() == null)
return false;
if (other.getCannedAclForObjects() != null && other.getCannedAclForObjects().equals(this.getCannedAclForObjects()) == false)
return false;
if (other.getAddColumnName() == null ^ this.getAddColumnName() == null)
return false;
if (other.getAddColumnName() != null && other.getAddColumnName().equals(this.getAddColumnName()) == false)
return false;
if (other.getCdcMaxBatchInterval() == null ^ this.getCdcMaxBatchInterval() == null)
return false;
if (other.getCdcMaxBatchInterval() != null && other.getCdcMaxBatchInterval().equals(this.getCdcMaxBatchInterval()) == false)
return false;
if (other.getCdcMinFileSize() == null ^ this.getCdcMinFileSize() == null)
return false;
if (other.getCdcMinFileSize() != null && other.getCdcMinFileSize().equals(this.getCdcMinFileSize()) == false)
return false;
if (other.getCsvNullValue() == null ^ this.getCsvNullValue() == null)
return false;
if (other.getCsvNullValue() != null && other.getCsvNullValue().equals(this.getCsvNullValue()) == false)
return false;
if (other.getIgnoreHeaderRows() == null ^ this.getIgnoreHeaderRows() == null)
return false;
if (other.getIgnoreHeaderRows() != null && other.getIgnoreHeaderRows().equals(this.getIgnoreHeaderRows()) == false)
return false;
if (other.getMaxFileSize() == null ^ this.getMaxFileSize() == null)
return false;
if (other.getMaxFileSize() != null && other.getMaxFileSize().equals(this.getMaxFileSize()) == false)
return false;
if (other.getRfc4180() == null ^ this.getRfc4180() == null)
return false;
if (other.getRfc4180() != null && other.getRfc4180().equals(this.getRfc4180()) == false)
return false;
if (other.getDatePartitionTimezone() == null ^ this.getDatePartitionTimezone() == null)
return false;
if (other.getDatePartitionTimezone() != null && other.getDatePartitionTimezone().equals(this.getDatePartitionTimezone()) == false)
return false;
if (other.getAddTrailingPaddingCharacter() == null ^ this.getAddTrailingPaddingCharacter() == null)
return false;
if (other.getAddTrailingPaddingCharacter() != null && other.getAddTrailingPaddingCharacter().equals(this.getAddTrailingPaddingCharacter()) == false)
return false;
if (other.getExpectedBucketOwner() == null ^ this.getExpectedBucketOwner() == null)
return false;
if (other.getExpectedBucketOwner() != null && other.getExpectedBucketOwner().equals(this.getExpectedBucketOwner()) == false)
return false;
if (other.getGlueCatalogGeneration() == null ^ this.getGlueCatalogGeneration() == null)
return false;
if (other.getGlueCatalogGeneration() != null && other.getGlueCatalogGeneration().equals(this.getGlueCatalogGeneration()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getServiceAccessRoleArn() == null) ? 0 : getServiceAccessRoleArn().hashCode());
hashCode = prime * hashCode + ((getExternalTableDefinition() == null) ? 0 : getExternalTableDefinition().hashCode());
hashCode = prime * hashCode + ((getCsvRowDelimiter() == null) ? 0 : getCsvRowDelimiter().hashCode());
hashCode = prime * hashCode + ((getCsvDelimiter() == null) ? 0 : getCsvDelimiter().hashCode());
hashCode = prime * hashCode + ((getBucketFolder() == null) ? 0 : getBucketFolder().hashCode());
hashCode = prime * hashCode + ((getBucketName() == null) ? 0 : getBucketName().hashCode());
hashCode = prime * hashCode + ((getCompressionType() == null) ? 0 : getCompressionType().hashCode());
hashCode = prime * hashCode + ((getEncryptionMode() == null) ? 0 : getEncryptionMode().hashCode());
hashCode = prime * hashCode + ((getServerSideEncryptionKmsKeyId() == null) ? 0 : getServerSideEncryptionKmsKeyId().hashCode());
hashCode = prime * hashCode + ((getDataFormat() == null) ? 0 : getDataFormat().hashCode());
hashCode = prime * hashCode + ((getEncodingType() == null) ? 0 : getEncodingType().hashCode());
hashCode = prime * hashCode + ((getDictPageSizeLimit() == null) ? 0 : getDictPageSizeLimit().hashCode());
hashCode = prime * hashCode + ((getRowGroupLength() == null) ? 0 : getRowGroupLength().hashCode());
hashCode = prime * hashCode + ((getDataPageSize() == null) ? 0 : getDataPageSize().hashCode());
hashCode = prime * hashCode + ((getParquetVersion() == null) ? 0 : getParquetVersion().hashCode());
hashCode = prime * hashCode + ((getEnableStatistics() == null) ? 0 : getEnableStatistics().hashCode());
hashCode = prime * hashCode + ((getIncludeOpForFullLoad() == null) ? 0 : getIncludeOpForFullLoad().hashCode());
hashCode = prime * hashCode + ((getCdcInsertsOnly() == null) ? 0 : getCdcInsertsOnly().hashCode());
hashCode = prime * hashCode + ((getTimestampColumnName() == null) ? 0 : getTimestampColumnName().hashCode());
hashCode = prime * hashCode + ((getParquetTimestampInMillisecond() == null) ? 0 : getParquetTimestampInMillisecond().hashCode());
hashCode = prime * hashCode + ((getCdcInsertsAndUpdates() == null) ? 0 : getCdcInsertsAndUpdates().hashCode());
hashCode = prime * hashCode + ((getDatePartitionEnabled() == null) ? 0 : getDatePartitionEnabled().hashCode());
hashCode = prime * hashCode + ((getDatePartitionSequence() == null) ? 0 : getDatePartitionSequence().hashCode());
hashCode = prime * hashCode + ((getDatePartitionDelimiter() == null) ? 0 : getDatePartitionDelimiter().hashCode());
hashCode = prime * hashCode + ((getUseCsvNoSupValue() == null) ? 0 : getUseCsvNoSupValue().hashCode());
hashCode = prime * hashCode + ((getCsvNoSupValue() == null) ? 0 : getCsvNoSupValue().hashCode());
hashCode = prime * hashCode + ((getPreserveTransactions() == null) ? 0 : getPreserveTransactions().hashCode());
hashCode = prime * hashCode + ((getCdcPath() == null) ? 0 : getCdcPath().hashCode());
hashCode = prime * hashCode + ((getUseTaskStartTimeForFullLoadTimestamp() == null) ? 0 : getUseTaskStartTimeForFullLoadTimestamp().hashCode());
hashCode = prime * hashCode + ((getCannedAclForObjects() == null) ? 0 : getCannedAclForObjects().hashCode());
hashCode = prime * hashCode + ((getAddColumnName() == null) ? 0 : getAddColumnName().hashCode());
hashCode = prime * hashCode + ((getCdcMaxBatchInterval() == null) ? 0 : getCdcMaxBatchInterval().hashCode());
hashCode = prime * hashCode + ((getCdcMinFileSize() == null) ? 0 : getCdcMinFileSize().hashCode());
hashCode = prime * hashCode + ((getCsvNullValue() == null) ? 0 : getCsvNullValue().hashCode());
hashCode = prime * hashCode + ((getIgnoreHeaderRows() == null) ? 0 : getIgnoreHeaderRows().hashCode());
hashCode = prime * hashCode + ((getMaxFileSize() == null) ? 0 : getMaxFileSize().hashCode());
hashCode = prime * hashCode + ((getRfc4180() == null) ? 0 : getRfc4180().hashCode());
hashCode = prime * hashCode + ((getDatePartitionTimezone() == null) ? 0 : getDatePartitionTimezone().hashCode());
hashCode = prime * hashCode + ((getAddTrailingPaddingCharacter() == null) ? 0 : getAddTrailingPaddingCharacter().hashCode());
hashCode = prime * hashCode + ((getExpectedBucketOwner() == null) ? 0 : getExpectedBucketOwner().hashCode());
hashCode = prime * hashCode + ((getGlueCatalogGeneration() == null) ? 0 : getGlueCatalogGeneration().hashCode());
return hashCode;
}
@Override
public S3Settings clone() {
try {
return (S3Settings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.S3SettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}