
com.amazonaws.services.databasemigrationservice.model.S3Settings Maven / Gradle / Ivy
/*
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.databasemigrationservice.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Settings for exporting data to Amazon S3.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class S3Settings implements Serializable, Cloneable, StructuredPojo {
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*/
private String serviceAccessRoleArn;
/**
*
* The external table definition.
*
*/
private String externalTableDefinition;
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*/
private String csvRowDelimiter;
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*/
private String csvDelimiter;
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*/
private String bucketFolder;
/**
*
* The name of the S3 bucket.
*
*/
private String bucketName;
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*/
private String compressionType;
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*/
private String encryptionMode;
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The key
* that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and
* allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*/
private String serverSideEncryptionKmsKeyId;
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*/
private String dataFormat;
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*/
private String encodingType;
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*/
private Integer dictPageSizeLimit;
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*/
private Integer rowGroupLength;
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*/
private Integer dataPageSize;
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*/
private String parquetVersion;
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*/
private Boolean enableStatistics;
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only
* to indicate how the rows were added to the source database.
*
*
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
*
*
*/
private Boolean includeOpForFullLoad;
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*/
private Boolean cdcInsertsOnly;
/**
*
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*/
private String timestampColumnName;
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan
* to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*/
private Boolean parquetTimestampInMillisecond;
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from the
* source database are migrated to the .csv or .parquet file.
*
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
,
* the first field of every CDC record is set to either I
or U
to indicate INSERT and
* UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC
* records are written without an indication of INSERT or UPDATE operations at the source. For more information
* about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*/
private Boolean cdcInsertsAndUpdates;
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service access IAM role.
*/
public void setServiceAccessRoleArn(String serviceAccessRoleArn) {
this.serviceAccessRoleArn = serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @return The Amazon Resource Name (ARN) used by the service access IAM role.
*/
public String getServiceAccessRoleArn() {
return this.serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service access IAM role.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServiceAccessRoleArn(String serviceAccessRoleArn) {
setServiceAccessRoleArn(serviceAccessRoleArn);
return this;
}
/**
*
* The external table definition.
*
*
* @param externalTableDefinition
* The external table definition.
*/
public void setExternalTableDefinition(String externalTableDefinition) {
this.externalTableDefinition = externalTableDefinition;
}
/**
*
* The external table definition.
*
*
* @return The external table definition.
*/
public String getExternalTableDefinition() {
return this.externalTableDefinition;
}
/**
*
* The external table definition.
*
*
* @param externalTableDefinition
* The external table definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withExternalTableDefinition(String externalTableDefinition) {
setExternalTableDefinition(externalTableDefinition);
return this;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
* ).
*/
public void setCsvRowDelimiter(String csvRowDelimiter) {
this.csvRowDelimiter = csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @return The delimiter used to separate rows in the source files. The default is a carriage return (
* \n
).
*/
public String getCsvRowDelimiter() {
return this.csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
* ).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvRowDelimiter(String csvRowDelimiter) {
setCsvRowDelimiter(csvRowDelimiter);
return this;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the source files. The default is a comma.
*/
public void setCsvDelimiter(String csvDelimiter) {
this.csvDelimiter = csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @return The delimiter used to separate columns in the source files. The default is a comma.
*/
public String getCsvDelimiter() {
return this.csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the source files. The default is a comma.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvDelimiter(String csvDelimiter) {
setCsvDelimiter(csvDelimiter);
return this;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
*/
public void setBucketFolder(String bucketFolder) {
this.bucketFolder = bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @return An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
*/
public String getBucketFolder() {
return this.bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't specified, then
* the path used is schema_name/table_name/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* bucketFolder/schema_name/table_name/
. If this parameter isn't
* specified, then the path used is schema_name/table_name/
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketFolder(String bucketFolder) {
setBucketFolder(bucketFolder);
return this;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @return The name of the S3 bucket.
*/
public String getBucketName() {
return this.bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketName(String bucketName) {
setBucketName(bucketName);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @return An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public String getCompressionType() {
return this.compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(String compressionType) {
setCompressionType(compressionType);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @see CompressionTypeValue
*/
public void setCompressionType(CompressionTypeValue compressionType) {
withCompressionType(compressionType);
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Either
* set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This parameter applies
* to both .csv and .parquet file formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Either set this parameter to NONE (the default) or don't use it to leave the files uncompressed. This
* parameter applies to both .csv and .parquet file formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(CompressionTypeValue compressionType) {
this.compressionType = compressionType.toString();
return this;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
. To use SSE_S3
, you need an AWS
* Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to
* use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(String encryptionMode) {
this.encryptionMode = encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @return The type of server-side encryption that you want to use for your data. This encryption type is part of
* the endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
. To use SSE_S3
, you need an AWS
* Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to
* use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public String getEncryptionMode() {
return this.encryptionMode;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
. To use SSE_S3
, you need an AWS
* Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to
* use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(String encryptionMode) {
setEncryptionMode(encryptionMode);
return this;
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
. To use SSE_S3
, you need an AWS
* Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to
* use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(EncryptionModeValue encryptionMode) {
withEncryptionMode(encryptionMode);
}
/**
*
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either SSE_S3
* (the default) or SSE_KMS
. To use SSE_S3
, you need an AWS Identity and Access Management
* (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server-side encryption that you want to use for your data. This encryption type is part of the
* endpoint settings or the extra connections attributes for Amazon S3. You can choose either
* SSE_S3
(the default) or SSE_KMS
. To use SSE_S3
, you need an AWS
* Identity and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*"
to
* use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(EncryptionModeValue encryptionMode) {
this.encryptionMode = encryptionMode.toString();
return this;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The key
* that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and
* allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The
* key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user
* permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*/
public void setServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
this.serverSideEncryptionKmsKeyId = serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The key
* that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and
* allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @return If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID.
* The key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user
* permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*/
public String getServerSideEncryptionKmsKeyId() {
return this.serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The key
* that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user permissions and
* allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS
for the EncryptionMode
, provide the AWS KMS key ID. The
* key that you use needs an attached policy that enables AWS Identity and Access Management (IAM) user
* permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier value --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
setServerSideEncryptionKmsKeyId(serverSideEncryptionKmsKeyId);
return this;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(String dataFormat) {
this.dataFormat = dataFormat;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @return The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features
* efficient compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public String getDataFormat() {
return this.dataFormat;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(String dataFormat) {
setDataFormat(dataFormat);
return this;
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(DataFormatValue dataFormat) {
withDataFormat(dataFormat);
}
/**
*
* The format of the data that you want to use for output. You can choose one of the following:
*
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data that you want to use for output. You can choose one of the following:
*
* -
*
* csv
: This is a row-based file format with comma-separated values (.csv).
*
*
* -
*
* parquet
: Apache Parquet (.parquet) is a columnar storage file format that features efficient
* compression and provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(DataFormatValue dataFormat) {
this.dataFormat = dataFormat.toString();
return this;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(String encodingType) {
this.encodingType = encodingType;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @return The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public String getEncodingType() {
return this.encodingType;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(String encodingType) {
setEncodingType(encodingType);
return this;
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(EncodingTypeValue encodingType) {
withEncodingType(encodingType);
}
/**
*
* The type of encoding you are using:
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using:
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently. This is the default.
*
*
* -
*
* PLAIN
doesn't use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(EncodingTypeValue encodingType) {
this.encodingType = encodingType.toString();
return this;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
*/
public void setDictPageSizeLimit(Integer dictPageSizeLimit) {
this.dictPageSizeLimit = dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @return The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
*/
public Integer getDictPageSizeLimit() {
return this.dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024 bytes (1 MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. This size is used for
* .parquet file format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. This parameter defaults to 1024 * 1024
* bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN
encoding.
* This size is used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDictPageSizeLimit(Integer dictPageSizeLimit) {
setDictPageSizeLimit(dictPageSizeLimit);
return this;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public void setRowGroupLength(Integer rowGroupLength) {
this.rowGroupLength = rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @return The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public Integer getRowGroupLength() {
return this.rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet
* file format only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used
* for .parquet file format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withRowGroupLength(Integer rowGroupLength) {
setRowGroupLength(rowGroupLength);
return this;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
*/
public void setDataPageSize(Integer dataPageSize) {
this.dataPageSize = dataPageSize;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @return The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
*/
public Integer getDataPageSize() {
return this.dataPageSize;
}
/**
*
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for
* .parquet file format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is
* used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDataPageSize(Integer dataPageSize) {
setDataPageSize(dataPageSize);
return this;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(String parquetVersion) {
this.parquetVersion = parquetVersion;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @return The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public String getParquetVersion() {
return this.parquetVersion;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(String parquetVersion) {
setParquetVersion(parquetVersion);
return this;
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(ParquetVersionValue parquetVersion) {
withParquetVersion(parquetVersion);
}
/**
*
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
*
*
* @param parquetVersion
* The version of the Apache Parquet format that you want to use: parquet_1_0
(the default) or
* parquet_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(ParquetVersionValue parquetVersion) {
this.parquetVersion = parquetVersion.toString();
return this;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @param enableStatistics
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value is
* used for .parquet file format only.
*/
public void setEnableStatistics(Boolean enableStatistics) {
this.enableStatistics = enableStatistics;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @return A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value
* is used for .parquet file format only.
*/
public Boolean getEnableStatistics() {
return this.enableStatistics;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @param enableStatistics
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value is
* used for .parquet file format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withEnableStatistics(Boolean enableStatistics) {
setEnableStatistics(enableStatistics);
return this;
}
/**
*
* A value that enables statistics for Parquet pages and row groups. Choose true
to enable statistics,
* false
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. This parameter defaults to true
. This value is used for .parquet file
* format only.
*
*
* @return A value that enables statistics for Parquet pages and row groups. Choose true
to enable
* statistics, false
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. This parameter defaults to true
. This value
* is used for .parquet file format only.
*/
public Boolean isEnableStatistics() {
return this.enableStatistics;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only
* to indicate how the rows were added to the source database.
*
*
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
*
*
*
* @param includeOpForFullLoad
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output
* files only to indicate how the rows were added to the source database.
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*/
public void setIncludeOpForFullLoad(Boolean includeOpForFullLoad) {
this.includeOpForFullLoad = includeOpForFullLoad;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only
* to indicate how the rows were added to the source database.
*
*
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
*
*
*
* @return A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output
* files only to indicate how the rows were added to the source database.
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about
* how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*/
public Boolean getIncludeOpForFullLoad() {
return this.includeOpForFullLoad;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only
* to indicate how the rows were added to the source database.
*
*
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
*
*
*
* @param includeOpForFullLoad
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output
* files only to indicate how the rows were added to the source database.
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withIncludeOpForFullLoad(Boolean includeOpForFullLoad) {
setIncludeOpForFullLoad(includeOpForFullLoad);
return this;
}
/**
*
* A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output files only
* to indicate how the rows were added to the source database.
*
*
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information is
* recorded in these output files for a full load to indicate that the rows were inserted at the source database. If
* IncludeOpForFullLoad
is set to true
or y
, the INSERT is recorded as an I
* annotation in the first field of the .csv file. This allows the format of your target records from a full load to
* be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the CdcInsertsAndUpdates
* parameters for output to .csv files only. For more information about how these settings work together, see
* Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User Guide..
*
*
*
* @return A value that enables a full load to write INSERT operations to the comma-separated value (.csv) output
* files only to indicate how the rows were added to the source database.
*
* AWS DMS supports the IncludeOpForFullLoad
parameter in versions 3.1.4 and later.
*
*
*
* For full load, records can only be inserted. By default (the false
setting), no information
* is recorded in these output files for a full load to indicate that the rows were inserted at the source
* database. If IncludeOpForFullLoad
is set to true
or y
, the INSERT
* is recorded as an I annotation in the first field of the .csv file. This allows the format of your target
* records from a full load to be consistent with the target records from a CDC load.
*
*
*
* This setting works together with the CdcInsertsOnly
and the
* CdcInsertsAndUpdates
parameters for output to .csv files only. For more information about
* how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*/
public Boolean isIncludeOpForFullLoad() {
return this.includeOpForFullLoad;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsOnly
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv or
* .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether
* the row was inserted, updated, or deleted at the source database for a CDC load to the target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded
* depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written
* without a first field to indicate the INSERT operation at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public void setCdcInsertsOnly(Boolean cdcInsertsOnly) {
this.cdcInsertsOnly = cdcInsertsOnly;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv
* or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a CDC load to the
* target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the
* source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are
* recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
* is set to true
, the first field of every CDC record is set to I to indicate the INSERT
* operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the source. For more
* information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean getCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsOnly
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv or
* .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether
* the row was inserted, updated, or deleted at the source database for a CDC load to the target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded
* depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written
* without a first field to indicate the INSERT operation at the source. For more information about how these
* settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcInsertsOnly(Boolean cdcInsertsOnly) {
setCdcInsertsOnly(cdcInsertsOnly);
return this;
}
/**
*
* A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar storage
* (.parquet) output files. By default (the false
setting), the first field in a .csv or .parquet
* record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate whether the row was
* inserted, updated, or deleted at the source database for a CDC load to the target.
*
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the source
* database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are recorded depends
* on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to I to indicate the INSERT operation at the
* source. If IncludeOpForFullLoad
is set to false
, every CDC record is written without a
* first field to indicate the INSERT operation at the source. For more information about how these settings work
* together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write only INSERT operations to .csv or columnar
* storage (.parquet) output files. By default (the false
setting), the first field in a .csv
* or .parquet record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These values indicate
* whether the row was inserted, updated, or deleted at the source database for a CDC load to the
* target.
*
* If CdcInsertsOnly
is set to true
or y
, only INSERTs from the
* source database are migrated to the .csv or .parquet file. For .csv format only, how these INSERTs are
* recorded depends on the value of IncludeOpForFullLoad
. If IncludeOpForFullLoad
* is set to true
, the first field of every CDC record is set to I to indicate the INSERT
* operation at the source. If IncludeOpForFullLoad
is set to false
, every CDC
* record is written without a first field to indicate the INSERT operation at the source. For more
* information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the interaction described preceding between the CdcInsertsOnly
and
* IncludeOpForFullLoad
parameters in versions 3.1.4 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean isCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
*
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @param timestampColumnName
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data
* for an Amazon S3 target.
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
*/
public void setTimestampColumnName(String timestampColumnName) {
this.timestampColumnName = timestampColumnName;
}
/**
*
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @return A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data
* for an Amazon S3 target.
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
*/
public String getTimestampColumnName() {
return this.timestampColumnName;
}
/**
*
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data for an
* Amazon S3 target.
*
*
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your migrated data
* when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred from
* the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the commit of
* that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default, the
* precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on the commit
* timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for the
* timestamp column that you set with TimestampColumnName
.
*
*
* @param timestampColumnName
* A value that when nonblank causes AWS DMS to add a column with timestamp information to the endpoint data
* for an Amazon S3 target.
*
* AWS DMS supports the TimestampColumnName
parameter in versions 3.1.4 and later.
*
*
*
* DMS includes an additional STRING
column in the .csv or .parquet object files of your
* migrated data when you set TimestampColumnName
to a nonblank value.
*
*
* For a full load, each row of this timestamp column contains a timestamp for when the data was transferred
* from the source to the target by DMS.
*
*
* For a change data capture (CDC) load, each row of the timestamp column contains the timestamp for the
* commit of that row in the source database.
*
*
* The string format for this timestamp column value is yyyy-MM-dd HH:mm:ss.SSSSSS
. By default,
* the precision of this value is in microseconds. For a CDC load, the rounding of the precision depends on
* the commit timestamp supported by DMS for the source database.
*
*
* When the AddColumnName
parameter is set to true
, DMS also includes a name for
* the timestamp column that you set with TimestampColumnName
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withTimestampColumnName(String timestampColumnName) {
setTimestampColumnName(timestampColumnName);
return this;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan
* to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @param parquetTimestampInMillisecond
* A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS
* writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision.
* Otherwise, DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with
* microsecond precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public void setParquetTimestampInMillisecond(Boolean parquetTimestampInMillisecond) {
this.parquetTimestampInMillisecond = parquetTimestampInMillisecond;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan
* to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @return A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS
* writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision.
* Otherwise, DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with
* microsecond precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public Boolean getParquetTimestampInMillisecond() {
return this.parquetTimestampInMillisecond;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan
* to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @param parquetTimestampInMillisecond
* A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS
* writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision.
* Otherwise, DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with
* microsecond precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withParquetTimestampInMillisecond(Boolean parquetTimestampInMillisecond) {
setParquetTimestampInMillisecond(parquetTimestampInMillisecond);
return this;
}
/**
*
* A value that specifies the precision of any TIMESTAMP
column values that are written to an Amazon S3
* object file in .parquet format.
*
*
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS writes all
* TIMESTAMP
columns in a .parquet formatted file with millisecond precision. Otherwise, DMS writes
* them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
values.
* Set this parameter to true
for S3 endpoint object files that are .parquet formatted only if you plan
* to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with microsecond
* precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp column
* value that is inserted by setting the TimestampColumnName
parameter.
*
*
*
* @return A value that specifies the precision of any TIMESTAMP
column values that are written to an
* Amazon S3 object file in .parquet format.
*
* AWS DMS supports the ParquetTimestampInMillisecond
parameter in versions 3.1.4 and later.
*
*
*
* When ParquetTimestampInMillisecond
is set to true
or y
, AWS DMS
* writes all TIMESTAMP
columns in a .parquet formatted file with millisecond precision.
* Otherwise, DMS writes them with microsecond precision.
*
*
* Currently, Amazon Athena and AWS Glue can handle only millisecond precision for TIMESTAMP
* values. Set this parameter to true
for S3 endpoint object files that are .parquet formatted
* only if you plan to query or process the data with Athena or AWS Glue.
*
*
*
* AWS DMS writes any TIMESTAMP
column values written to an S3 file in .csv format with
* microsecond precision.
*
*
* Setting ParquetTimestampInMillisecond
has no effect on the string format of the timestamp
* column value that is inserted by setting the TimestampColumnName
parameter.
*
*/
public Boolean isParquetTimestampInMillisecond() {
return this.parquetTimestampInMillisecond;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from the
* source database are migrated to the .csv or .parquet file.
*
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
,
* the first field of every CDC record is set to either I
or U
to indicate INSERT and
* UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC
* records are written without an indication of INSERT or UPDATE operations at the source. For more information
* about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsAndUpdates
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to either I
or U
* to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set to
* false
, CDC records are written without an indication of INSERT or UPDATE operations at the
* source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public void setCdcInsertsAndUpdates(Boolean cdcInsertsAndUpdates) {
this.cdcInsertsAndUpdates = cdcInsertsAndUpdates;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from the
* source database are migrated to the .csv or .parquet file.
*
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
,
* the first field of every CDC record is set to either I
or U
to indicate INSERT and
* UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC
* records are written without an indication of INSERT or UPDATE operations at the source. For more information
* about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to either I
or U
* to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set
* to false
, CDC records are written without an indication of INSERT or UPDATE operations at
* the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean getCdcInsertsAndUpdates() {
return this.cdcInsertsAndUpdates;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from the
* source database are migrated to the .csv or .parquet file.
*
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
,
* the first field of every CDC record is set to either I
or U
to indicate INSERT and
* UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC
* records are written without an indication of INSERT or UPDATE operations at the source. For more information
* about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @param cdcInsertsAndUpdates
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to either I
or U
* to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set to
* false
, CDC records are written without an indication of INSERT or UPDATE operations at the
* source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcInsertsAndUpdates(Boolean cdcInsertsAndUpdates) {
setCdcInsertsAndUpdates(cdcInsertsAndUpdates);
return this;
}
/**
*
* A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or .parquet
* (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from the
* source database are migrated to the .csv or .parquet file.
*
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to true
,
* the first field of every CDC record is set to either I
or U
to indicate INSERT and
* UPDATE operations at the source. But if IncludeOpForFullLoad
is set to false
, CDC
* records are written without an indication of INSERT or UPDATE operations at the source. For more information
* about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
for the
* same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to true
* for the same endpoint, but not both.
*
*
*
* @return A value that enables a change data capture (CDC) load to write INSERT and UPDATE operations to .csv or
* .parquet (columnar storage) output files. The default setting is false
, but when
* CdcInsertsAndUpdates
is set to true
or y
, INSERTs and UPDATEs from
* the source database are migrated to the .csv or .parquet file.
*
* For .csv file format only, how these INSERTs and UPDATEs are recorded depends on the value of the
* IncludeOpForFullLoad
parameter. If IncludeOpForFullLoad
is set to
* true
, the first field of every CDC record is set to either I
or U
* to indicate INSERT and UPDATE operations at the source. But if IncludeOpForFullLoad
is set
* to false
, CDC records are written without an indication of INSERT or UPDATE operations at
* the source. For more information about how these settings work together, see Indicating Source DB Operations in Migrated S3 Data in the AWS Database Migration Service User
* Guide..
*
*
*
* AWS DMS supports the use of the CdcInsertsAndUpdates
parameter in versions 3.3.1 and later.
*
*
* CdcInsertsOnly
and CdcInsertsAndUpdates
can't both be set to true
* for the same endpoint. Set either CdcInsertsOnly
or CdcInsertsAndUpdates
to
* true
for the same endpoint, but not both.
*
*/
public Boolean isCdcInsertsAndUpdates() {
return this.cdcInsertsAndUpdates;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getServiceAccessRoleArn() != null)
sb.append("ServiceAccessRoleArn: ").append(getServiceAccessRoleArn()).append(",");
if (getExternalTableDefinition() != null)
sb.append("ExternalTableDefinition: ").append(getExternalTableDefinition()).append(",");
if (getCsvRowDelimiter() != null)
sb.append("CsvRowDelimiter: ").append(getCsvRowDelimiter()).append(",");
if (getCsvDelimiter() != null)
sb.append("CsvDelimiter: ").append(getCsvDelimiter()).append(",");
if (getBucketFolder() != null)
sb.append("BucketFolder: ").append(getBucketFolder()).append(",");
if (getBucketName() != null)
sb.append("BucketName: ").append(getBucketName()).append(",");
if (getCompressionType() != null)
sb.append("CompressionType: ").append(getCompressionType()).append(",");
if (getEncryptionMode() != null)
sb.append("EncryptionMode: ").append(getEncryptionMode()).append(",");
if (getServerSideEncryptionKmsKeyId() != null)
sb.append("ServerSideEncryptionKmsKeyId: ").append(getServerSideEncryptionKmsKeyId()).append(",");
if (getDataFormat() != null)
sb.append("DataFormat: ").append(getDataFormat()).append(",");
if (getEncodingType() != null)
sb.append("EncodingType: ").append(getEncodingType()).append(",");
if (getDictPageSizeLimit() != null)
sb.append("DictPageSizeLimit: ").append(getDictPageSizeLimit()).append(",");
if (getRowGroupLength() != null)
sb.append("RowGroupLength: ").append(getRowGroupLength()).append(",");
if (getDataPageSize() != null)
sb.append("DataPageSize: ").append(getDataPageSize()).append(",");
if (getParquetVersion() != null)
sb.append("ParquetVersion: ").append(getParquetVersion()).append(",");
if (getEnableStatistics() != null)
sb.append("EnableStatistics: ").append(getEnableStatistics()).append(",");
if (getIncludeOpForFullLoad() != null)
sb.append("IncludeOpForFullLoad: ").append(getIncludeOpForFullLoad()).append(",");
if (getCdcInsertsOnly() != null)
sb.append("CdcInsertsOnly: ").append(getCdcInsertsOnly()).append(",");
if (getTimestampColumnName() != null)
sb.append("TimestampColumnName: ").append(getTimestampColumnName()).append(",");
if (getParquetTimestampInMillisecond() != null)
sb.append("ParquetTimestampInMillisecond: ").append(getParquetTimestampInMillisecond()).append(",");
if (getCdcInsertsAndUpdates() != null)
sb.append("CdcInsertsAndUpdates: ").append(getCdcInsertsAndUpdates());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof S3Settings == false)
return false;
S3Settings other = (S3Settings) obj;
if (other.getServiceAccessRoleArn() == null ^ this.getServiceAccessRoleArn() == null)
return false;
if (other.getServiceAccessRoleArn() != null && other.getServiceAccessRoleArn().equals(this.getServiceAccessRoleArn()) == false)
return false;
if (other.getExternalTableDefinition() == null ^ this.getExternalTableDefinition() == null)
return false;
if (other.getExternalTableDefinition() != null && other.getExternalTableDefinition().equals(this.getExternalTableDefinition()) == false)
return false;
if (other.getCsvRowDelimiter() == null ^ this.getCsvRowDelimiter() == null)
return false;
if (other.getCsvRowDelimiter() != null && other.getCsvRowDelimiter().equals(this.getCsvRowDelimiter()) == false)
return false;
if (other.getCsvDelimiter() == null ^ this.getCsvDelimiter() == null)
return false;
if (other.getCsvDelimiter() != null && other.getCsvDelimiter().equals(this.getCsvDelimiter()) == false)
return false;
if (other.getBucketFolder() == null ^ this.getBucketFolder() == null)
return false;
if (other.getBucketFolder() != null && other.getBucketFolder().equals(this.getBucketFolder()) == false)
return false;
if (other.getBucketName() == null ^ this.getBucketName() == null)
return false;
if (other.getBucketName() != null && other.getBucketName().equals(this.getBucketName()) == false)
return false;
if (other.getCompressionType() == null ^ this.getCompressionType() == null)
return false;
if (other.getCompressionType() != null && other.getCompressionType().equals(this.getCompressionType()) == false)
return false;
if (other.getEncryptionMode() == null ^ this.getEncryptionMode() == null)
return false;
if (other.getEncryptionMode() != null && other.getEncryptionMode().equals(this.getEncryptionMode()) == false)
return false;
if (other.getServerSideEncryptionKmsKeyId() == null ^ this.getServerSideEncryptionKmsKeyId() == null)
return false;
if (other.getServerSideEncryptionKmsKeyId() != null && other.getServerSideEncryptionKmsKeyId().equals(this.getServerSideEncryptionKmsKeyId()) == false)
return false;
if (other.getDataFormat() == null ^ this.getDataFormat() == null)
return false;
if (other.getDataFormat() != null && other.getDataFormat().equals(this.getDataFormat()) == false)
return false;
if (other.getEncodingType() == null ^ this.getEncodingType() == null)
return false;
if (other.getEncodingType() != null && other.getEncodingType().equals(this.getEncodingType()) == false)
return false;
if (other.getDictPageSizeLimit() == null ^ this.getDictPageSizeLimit() == null)
return false;
if (other.getDictPageSizeLimit() != null && other.getDictPageSizeLimit().equals(this.getDictPageSizeLimit()) == false)
return false;
if (other.getRowGroupLength() == null ^ this.getRowGroupLength() == null)
return false;
if (other.getRowGroupLength() != null && other.getRowGroupLength().equals(this.getRowGroupLength()) == false)
return false;
if (other.getDataPageSize() == null ^ this.getDataPageSize() == null)
return false;
if (other.getDataPageSize() != null && other.getDataPageSize().equals(this.getDataPageSize()) == false)
return false;
if (other.getParquetVersion() == null ^ this.getParquetVersion() == null)
return false;
if (other.getParquetVersion() != null && other.getParquetVersion().equals(this.getParquetVersion()) == false)
return false;
if (other.getEnableStatistics() == null ^ this.getEnableStatistics() == null)
return false;
if (other.getEnableStatistics() != null && other.getEnableStatistics().equals(this.getEnableStatistics()) == false)
return false;
if (other.getIncludeOpForFullLoad() == null ^ this.getIncludeOpForFullLoad() == null)
return false;
if (other.getIncludeOpForFullLoad() != null && other.getIncludeOpForFullLoad().equals(this.getIncludeOpForFullLoad()) == false)
return false;
if (other.getCdcInsertsOnly() == null ^ this.getCdcInsertsOnly() == null)
return false;
if (other.getCdcInsertsOnly() != null && other.getCdcInsertsOnly().equals(this.getCdcInsertsOnly()) == false)
return false;
if (other.getTimestampColumnName() == null ^ this.getTimestampColumnName() == null)
return false;
if (other.getTimestampColumnName() != null && other.getTimestampColumnName().equals(this.getTimestampColumnName()) == false)
return false;
if (other.getParquetTimestampInMillisecond() == null ^ this.getParquetTimestampInMillisecond() == null)
return false;
if (other.getParquetTimestampInMillisecond() != null
&& other.getParquetTimestampInMillisecond().equals(this.getParquetTimestampInMillisecond()) == false)
return false;
if (other.getCdcInsertsAndUpdates() == null ^ this.getCdcInsertsAndUpdates() == null)
return false;
if (other.getCdcInsertsAndUpdates() != null && other.getCdcInsertsAndUpdates().equals(this.getCdcInsertsAndUpdates()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getServiceAccessRoleArn() == null) ? 0 : getServiceAccessRoleArn().hashCode());
hashCode = prime * hashCode + ((getExternalTableDefinition() == null) ? 0 : getExternalTableDefinition().hashCode());
hashCode = prime * hashCode + ((getCsvRowDelimiter() == null) ? 0 : getCsvRowDelimiter().hashCode());
hashCode = prime * hashCode + ((getCsvDelimiter() == null) ? 0 : getCsvDelimiter().hashCode());
hashCode = prime * hashCode + ((getBucketFolder() == null) ? 0 : getBucketFolder().hashCode());
hashCode = prime * hashCode + ((getBucketName() == null) ? 0 : getBucketName().hashCode());
hashCode = prime * hashCode + ((getCompressionType() == null) ? 0 : getCompressionType().hashCode());
hashCode = prime * hashCode + ((getEncryptionMode() == null) ? 0 : getEncryptionMode().hashCode());
hashCode = prime * hashCode + ((getServerSideEncryptionKmsKeyId() == null) ? 0 : getServerSideEncryptionKmsKeyId().hashCode());
hashCode = prime * hashCode + ((getDataFormat() == null) ? 0 : getDataFormat().hashCode());
hashCode = prime * hashCode + ((getEncodingType() == null) ? 0 : getEncodingType().hashCode());
hashCode = prime * hashCode + ((getDictPageSizeLimit() == null) ? 0 : getDictPageSizeLimit().hashCode());
hashCode = prime * hashCode + ((getRowGroupLength() == null) ? 0 : getRowGroupLength().hashCode());
hashCode = prime * hashCode + ((getDataPageSize() == null) ? 0 : getDataPageSize().hashCode());
hashCode = prime * hashCode + ((getParquetVersion() == null) ? 0 : getParquetVersion().hashCode());
hashCode = prime * hashCode + ((getEnableStatistics() == null) ? 0 : getEnableStatistics().hashCode());
hashCode = prime * hashCode + ((getIncludeOpForFullLoad() == null) ? 0 : getIncludeOpForFullLoad().hashCode());
hashCode = prime * hashCode + ((getCdcInsertsOnly() == null) ? 0 : getCdcInsertsOnly().hashCode());
hashCode = prime * hashCode + ((getTimestampColumnName() == null) ? 0 : getTimestampColumnName().hashCode());
hashCode = prime * hashCode + ((getParquetTimestampInMillisecond() == null) ? 0 : getParquetTimestampInMillisecond().hashCode());
hashCode = prime * hashCode + ((getCdcInsertsAndUpdates() == null) ? 0 : getCdcInsertsAndUpdates().hashCode());
return hashCode;
}
@Override
public S3Settings clone() {
try {
return (S3Settings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.S3SettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}