
com.amazonaws.services.databasemigrationservice.model.S3Settings Maven / Gradle / Ivy
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.databasemigrationservice.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Settings for exporting data to Amazon S3.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class S3Settings implements Serializable, Cloneable, StructuredPojo {
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*/
private String serviceAccessRoleArn;
/**
*
* The external table definition.
*
*/
private String externalTableDefinition;
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*/
private String csvRowDelimiter;
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*/
private String csvDelimiter;
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not specified,
* then the path used is <schema_name>/<table_name>/
.
*
*/
private String bucketFolder;
/**
*
* The name of the S3 bucket.
*
*/
private String bucketName;
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*/
private String compressionType;
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*/
private String encryptionMode;
/**
*
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*
*/
private String serverSideEncryptionKmsKeyId;
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*/
private String dataFormat;
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*/
private String encodingType;
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a
* dictionary page before it reverts to PLAIN
encoding. For PARQUET
format only.
*
*/
private Integer dictPageSizeLimit;
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For PARQUET
format
* only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*/
private Integer rowGroupLength;
/**
*
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format only.
*
*/
private Integer dataPageSize;
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*/
private String parquetVersion;
/**
*
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. Defaults to TRUE
. For PARQUET
format only.
*
*/
private Boolean enableStatistics;
/**
*
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By default,
* the first field in a CSV record contains the letter I
(insert), U
(update) or
* D
(delete) to indicate whether the row was inserted, updated, or deleted at the source database. If
* cdcInsertsOnly
is set to true, then only INSERT
s are recorded in the CSV file, without
* the I
annotation on each line. Valid values are TRUE
and FALSE
.
*
*/
private Boolean cdcInsertsOnly;
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service access IAM role.
*/
public void setServiceAccessRoleArn(String serviceAccessRoleArn) {
this.serviceAccessRoleArn = serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @return The Amazon Resource Name (ARN) used by the service access IAM role.
*/
public String getServiceAccessRoleArn() {
return this.serviceAccessRoleArn;
}
/**
*
* The Amazon Resource Name (ARN) used by the service access IAM role.
*
*
* @param serviceAccessRoleArn
* The Amazon Resource Name (ARN) used by the service access IAM role.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServiceAccessRoleArn(String serviceAccessRoleArn) {
setServiceAccessRoleArn(serviceAccessRoleArn);
return this;
}
/**
*
* The external table definition.
*
*
* @param externalTableDefinition
* The external table definition.
*/
public void setExternalTableDefinition(String externalTableDefinition) {
this.externalTableDefinition = externalTableDefinition;
}
/**
*
* The external table definition.
*
*
* @return The external table definition.
*/
public String getExternalTableDefinition() {
return this.externalTableDefinition;
}
/**
*
* The external table definition.
*
*
* @param externalTableDefinition
* The external table definition.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withExternalTableDefinition(String externalTableDefinition) {
setExternalTableDefinition(externalTableDefinition);
return this;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
* ).
*/
public void setCsvRowDelimiter(String csvRowDelimiter) {
this.csvRowDelimiter = csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @return The delimiter used to separate rows in the source files. The default is a carriage return (
* \n
).
*/
public String getCsvRowDelimiter() {
return this.csvRowDelimiter;
}
/**
*
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
).
*
*
* @param csvRowDelimiter
* The delimiter used to separate rows in the source files. The default is a carriage return (\n
* ).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvRowDelimiter(String csvRowDelimiter) {
setCsvRowDelimiter(csvRowDelimiter);
return this;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the source files. The default is a comma.
*/
public void setCsvDelimiter(String csvDelimiter) {
this.csvDelimiter = csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @return The delimiter used to separate columns in the source files. The default is a comma.
*/
public String getCsvDelimiter() {
return this.csvDelimiter;
}
/**
*
* The delimiter used to separate columns in the source files. The default is a comma.
*
*
* @param csvDelimiter
* The delimiter used to separate columns in the source files. The default is a comma.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCsvDelimiter(String csvDelimiter) {
setCsvDelimiter(csvDelimiter);
return this;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not specified,
* then the path used is <schema_name>/<table_name>/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not
* specified, then the path used is <schema_name>/<table_name>/
.
*/
public void setBucketFolder(String bucketFolder) {
this.bucketFolder = bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not specified,
* then the path used is <schema_name>/<table_name>/
.
*
*
* @return An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not
* specified, then the path used is <schema_name>/<table_name>/
.
*/
public String getBucketFolder() {
return this.bucketFolder;
}
/**
*
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not specified,
* then the path used is <schema_name>/<table_name>/
.
*
*
* @param bucketFolder
* An optional parameter to set a folder name in the S3 bucket. If provided, tables are created in the path
* <bucketFolder>/<schema_name>/<table_name>/
. If this parameter is not
* specified, then the path used is <schema_name>/<table_name>/
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketFolder(String bucketFolder) {
setBucketFolder(bucketFolder);
return this;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
*/
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @return The name of the S3 bucket.
*/
public String getBucketName() {
return this.bucketName;
}
/**
*
* The name of the S3 bucket.
*
*
* @param bucketName
* The name of the S3 bucket.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withBucketName(String bucketName) {
setBucketName(bucketName);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET
* data formats.
* @see CompressionTypeValue
*/
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*
* @return An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET
* data formats.
* @see CompressionTypeValue
*/
public String getCompressionType() {
return this.compressionType;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET
* data formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(String compressionType) {
setCompressionType(compressionType);
return this;
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET
* data formats.
* @see CompressionTypeValue
*/
public void setCompressionType(CompressionTypeValue compressionType) {
withCompressionType(compressionType);
}
/**
*
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files. Set to
* NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET data formats.
*
*
* @param compressionType
* An optional parameter to use GZIP to compress the target files. Set to GZIP to compress the target files.
* Set to NONE (the default) or do not use to leave the files uncompressed. Applies to both CSV and PARQUET
* data formats.
* @return Returns a reference to this object so that method calls can be chained together.
* @see CompressionTypeValue
*/
public S3Settings withCompressionType(CompressionTypeValue compressionType) {
this.compressionType = compressionType.toString();
return this;
}
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or
* the extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(String encryptionMode) {
this.encryptionMode = encryptionMode;
}
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @return The type of server side encryption you want to use for your data. This is part of the endpoint settings
* or the extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public String getEncryptionMode() {
return this.encryptionMode;
}
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or
* the extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(String encryptionMode) {
setEncryptionMode(encryptionMode);
return this;
}
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or
* the extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @see EncryptionModeValue
*/
public void setEncryptionMode(EncryptionModeValue encryptionMode) {
withEncryptionMode(encryptionMode);
}
/**
*
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or the
* extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
*
*
* @param encryptionMode
* The type of server side encryption you want to use for your data. This is part of the endpoint settings or
* the extra connections attributes for Amazon S3. You can choose either SSE_S3
(default) or
* SSE_KMS
. To use SSE_S3
, you need an IAM role with permission to allow
* "arn:aws:s3:::dms-*"
to use the following actions:
*
* -
*
* s3:CreateBucket
*
*
* -
*
* s3:ListBucket
*
*
* -
*
* s3:DeleteBucket
*
*
* -
*
* s3:GetBucketLocation
*
*
* -
*
* s3:GetObject
*
*
* -
*
* s3:PutObject
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:GetObjectVersion
*
*
* -
*
* s3:GetBucketPolicy
*
*
* -
*
* s3:PutBucketPolicy
*
*
* -
*
* s3:DeleteBucketPolicy
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncryptionModeValue
*/
public S3Settings withEncryptionMode(EncryptionModeValue encryptionMode) {
this.encryptionMode = encryptionMode.toString();
return this;
}
/**
*
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use
* needs an attached policy that enables IAM user permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*/
public void setServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
this.serverSideEncryptionKmsKeyId = serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*
*
* @return If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use
* needs an attached policy that enables IAM user permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*/
public String getServerSideEncryptionKmsKeyId() {
return this.serverSideEncryptionKmsKeyId;
}
/**
*
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use needs an
* attached policy that enables IAM user permissions and allows use of the key.
*
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
*
*
* @param serverSideEncryptionKmsKeyId
* If you are using SSE_KMS for the EncryptionMode
, provide the KMS Key ID. The key you use
* needs an attached policy that enables IAM user permissions and allows use of the key.
*
* Here is a CLI example:
* aws dms create-endpoint --endpoint-identifier <value> --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=<value>,BucketFolder=<value>,BucketName=<value>,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=<value>
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withServerSideEncryptionKmsKeyId(String serverSideEncryptionKmsKeyId) {
setServerSideEncryptionKmsKeyId(serverSideEncryptionKmsKeyId);
return this;
}
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data which you want to use for output. You can choose one of the following:
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(String dataFormat) {
this.dataFormat = dataFormat;
}
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*
* @return The format of the data which you want to use for output. You can choose one of the following:
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression
* and provides faster query response.
*
*
* @see DataFormatValue
*/
public String getDataFormat() {
return this.dataFormat;
}
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data which you want to use for output. You can choose one of the following:
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(String dataFormat) {
setDataFormat(dataFormat);
return this;
}
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data which you want to use for output. You can choose one of the following:
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
* @see DataFormatValue
*/
public void setDataFormat(DataFormatValue dataFormat) {
withDataFormat(dataFormat);
}
/**
*
* The format of the data which you want to use for output. You can choose one of the following:
*
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
*
*
* @param dataFormat
* The format of the data which you want to use for output. You can choose one of the following:
*
* -
*
* CSV
: This is a row-based format with comma-separated values.
*
*
* -
*
* PARQUET
: Apache Parquet is a columnar storage format that features efficient compression and
* provides faster query response.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataFormatValue
*/
public S3Settings withDataFormat(DataFormatValue dataFormat) {
this.dataFormat = dataFormat.toString();
return this;
}
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(String encodingType) {
this.encodingType = encodingType;
}
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @return The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public String getEncodingType() {
return this.encodingType;
}
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(String encodingType) {
setEncodingType(encodingType);
return this;
}
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @see EncodingTypeValue
*/
public void setEncodingType(EncodingTypeValue encodingType) {
withEncodingType(encodingType);
}
/**
*
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated values
* more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The dictionary is
* stored in a dictionary page for each column chunk.
*
*
*
*
* @param encodingType
* The type of encoding you are using: RLE_DICTIONARY
(default), PLAIN
, or
* PLAIN_DICTIONARY
.
*
* -
*
* RLE_DICTIONARY
uses a combination of bit-packing and run-length encoding to store repeated
* values more efficiently.
*
*
* -
*
* PLAIN
does not use encoding at all. Values are stored as they are.
*
*
* -
*
* PLAIN_DICTIONARY
builds a dictionary of the values encountered in a given column. The
* dictionary is stored in a dictionary page for each column chunk.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see EncodingTypeValue
*/
public S3Settings withEncodingType(EncodingTypeValue encodingType) {
this.encodingType = encodingType.toString();
return this;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a
* dictionary page before it reverts to PLAIN
encoding. For PARQUET
format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. For
* PARQUET
format only.
*/
public void setDictPageSizeLimit(Integer dictPageSizeLimit) {
this.dictPageSizeLimit = dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a
* dictionary page before it reverts to PLAIN
encoding. For PARQUET
format only.
*
*
* @return The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. For
* PARQUET
format only.
*/
public Integer getDictPageSizeLimit() {
return this.dictPageSizeLimit;
}
/**
*
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is
* stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the maximum size of a
* dictionary page before it reverts to PLAIN
encoding. For PARQUET
format only.
*
*
* @param dictPageSizeLimit
* The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this
* column is stored using an encoding type of PLAIN
. Defaults to 1024 * 1024 bytes (1MiB), the
* maximum size of a dictionary page before it reverts to PLAIN
encoding. For
* PARQUET
format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDictPageSizeLimit(Integer dictPageSizeLimit) {
setDictPageSizeLimit(dictPageSizeLimit);
return this;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For PARQUET
format
* only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For
* PARQUET
format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public void setRowGroupLength(Integer rowGroupLength) {
this.rowGroupLength = rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For PARQUET
format
* only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @return The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For
* PARQUET
format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
*/
public Integer getRowGroupLength() {
return this.rowGroupLength;
}
/**
*
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row
* groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For PARQUET
format
* only.
*
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group length in
* bytes (64 * 1024 * 1024).
*
*
* @param rowGroupLength
* The number of rows in a row group. A smaller row group size provides faster reads. But as the number of
* row groups grows, the slower writes become. Defaults to 10,000 (ten thousand) rows. For
* PARQUET
format only.
*
* If you choose a value larger than the maximum, RowGroupLength
is set to the max row group
* length in bytes (64 * 1024 * 1024).
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withRowGroupLength(Integer rowGroupLength) {
setRowGroupLength(rowGroupLength);
return this;
}
/**
*
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format
* only.
*/
public void setDataPageSize(Integer dataPageSize) {
this.dataPageSize = dataPageSize;
}
/**
*
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format only.
*
*
* @return The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format
* only.
*/
public Integer getDataPageSize() {
return this.dataPageSize;
}
/**
*
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format only.
*
*
* @param dataPageSize
* The size of one data page in bytes. Defaults to 1024 * 1024 bytes (1MiB). For PARQUET
format
* only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withDataPageSize(Integer dataPageSize) {
setDataPageSize(dataPageSize);
return this;
}
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*
* @param parquetVersion
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(String parquetVersion) {
this.parquetVersion = parquetVersion;
}
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*
* @return The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
* @see ParquetVersionValue
*/
public String getParquetVersion() {
return this.parquetVersion;
}
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*
* @param parquetVersion
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(String parquetVersion) {
setParquetVersion(parquetVersion);
return this;
}
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*
* @param parquetVersion
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
* @see ParquetVersionValue
*/
public void setParquetVersion(ParquetVersionValue parquetVersion) {
withParquetVersion(parquetVersion);
}
/**
*
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
*
*
* @param parquetVersion
* The version of Apache Parquet format you want to use: PARQUET_1_0
(default) or
* PARQUET_2_0
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ParquetVersionValue
*/
public S3Settings withParquetVersion(ParquetVersionValue parquetVersion) {
this.parquetVersion = parquetVersion.toString();
return this;
}
/**
*
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. Defaults to TRUE
. For PARQUET
format only.
*
*
* @param enableStatistics
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. Defaults to TRUE
. For PARQUET
* format only.
*/
public void setEnableStatistics(Boolean enableStatistics) {
this.enableStatistics = enableStatistics;
}
/**
*
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. Defaults to TRUE
. For PARQUET
format only.
*
*
* @return Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. Defaults to TRUE
. For PARQUET
* format only.
*/
public Boolean getEnableStatistics() {
return this.enableStatistics;
}
/**
*
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. Defaults to TRUE
. For PARQUET
format only.
*
*
* @param enableStatistics
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. Defaults to TRUE
. For PARQUET
* format only.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withEnableStatistics(Boolean enableStatistics) {
setEnableStatistics(enableStatistics);
return this;
}
/**
*
* Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
, MAX
, and
* MIN
values. Defaults to TRUE
. For PARQUET
format only.
*
*
* @return Enables statistics for Parquet pages and rowGroups. Choose TRUE
to enable statistics, choose
* FALSE
to disable. Statistics include NULL
, DISTINCT
,
* MAX
, and MIN
values. Defaults to TRUE
. For PARQUET
* format only.
*/
public Boolean isEnableStatistics() {
return this.enableStatistics;
}
/**
*
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By default,
* the first field in a CSV record contains the letter I
(insert), U
(update) or
* D
(delete) to indicate whether the row was inserted, updated, or deleted at the source database. If
* cdcInsertsOnly
is set to true, then only INSERT
s are recorded in the CSV file, without
* the I
annotation on each line. Valid values are TRUE
and FALSE
.
*
*
* @param cdcInsertsOnly
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By
* default, the first field in a CSV record contains the letter I
(insert), U
* (update) or D
(delete) to indicate whether the row was inserted, updated, or deleted at the
* source database. If cdcInsertsOnly
is set to true, then only INSERT
s are
* recorded in the CSV file, without the I
annotation on each line. Valid values are
* TRUE
and FALSE
.
*/
public void setCdcInsertsOnly(Boolean cdcInsertsOnly) {
this.cdcInsertsOnly = cdcInsertsOnly;
}
/**
*
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By default,
* the first field in a CSV record contains the letter I
(insert), U
(update) or
* D
(delete) to indicate whether the row was inserted, updated, or deleted at the source database. If
* cdcInsertsOnly
is set to true, then only INSERT
s are recorded in the CSV file, without
* the I
annotation on each line. Valid values are TRUE
and FALSE
.
*
*
* @return Option to write only INSERT
operations to the comma-separated value (CSV) output files. By
* default, the first field in a CSV record contains the letter I
(insert), U
* (update) or D
(delete) to indicate whether the row was inserted, updated, or deleted at the
* source database. If cdcInsertsOnly
is set to true, then only INSERT
s are
* recorded in the CSV file, without the I
annotation on each line. Valid values are
* TRUE
and FALSE
.
*/
public Boolean getCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
*
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By default,
* the first field in a CSV record contains the letter I
(insert), U
(update) or
* D
(delete) to indicate whether the row was inserted, updated, or deleted at the source database. If
* cdcInsertsOnly
is set to true, then only INSERT
s are recorded in the CSV file, without
* the I
annotation on each line. Valid values are TRUE
and FALSE
.
*
*
* @param cdcInsertsOnly
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By
* default, the first field in a CSV record contains the letter I
(insert), U
* (update) or D
(delete) to indicate whether the row was inserted, updated, or deleted at the
* source database. If cdcInsertsOnly
is set to true, then only INSERT
s are
* recorded in the CSV file, without the I
annotation on each line. Valid values are
* TRUE
and FALSE
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public S3Settings withCdcInsertsOnly(Boolean cdcInsertsOnly) {
setCdcInsertsOnly(cdcInsertsOnly);
return this;
}
/**
*
* Option to write only INSERT
operations to the comma-separated value (CSV) output files. By default,
* the first field in a CSV record contains the letter I
(insert), U
(update) or
* D
(delete) to indicate whether the row was inserted, updated, or deleted at the source database. If
* cdcInsertsOnly
is set to true, then only INSERT
s are recorded in the CSV file, without
* the I
annotation on each line. Valid values are TRUE
and FALSE
.
*
*
* @return Option to write only INSERT
operations to the comma-separated value (CSV) output files. By
* default, the first field in a CSV record contains the letter I
(insert), U
* (update) or D
(delete) to indicate whether the row was inserted, updated, or deleted at the
* source database. If cdcInsertsOnly
is set to true, then only INSERT
s are
* recorded in the CSV file, without the I
annotation on each line. Valid values are
* TRUE
and FALSE
.
*/
public Boolean isCdcInsertsOnly() {
return this.cdcInsertsOnly;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getServiceAccessRoleArn() != null)
sb.append("ServiceAccessRoleArn: ").append(getServiceAccessRoleArn()).append(",");
if (getExternalTableDefinition() != null)
sb.append("ExternalTableDefinition: ").append(getExternalTableDefinition()).append(",");
if (getCsvRowDelimiter() != null)
sb.append("CsvRowDelimiter: ").append(getCsvRowDelimiter()).append(",");
if (getCsvDelimiter() != null)
sb.append("CsvDelimiter: ").append(getCsvDelimiter()).append(",");
if (getBucketFolder() != null)
sb.append("BucketFolder: ").append(getBucketFolder()).append(",");
if (getBucketName() != null)
sb.append("BucketName: ").append(getBucketName()).append(",");
if (getCompressionType() != null)
sb.append("CompressionType: ").append(getCompressionType()).append(",");
if (getEncryptionMode() != null)
sb.append("EncryptionMode: ").append(getEncryptionMode()).append(",");
if (getServerSideEncryptionKmsKeyId() != null)
sb.append("ServerSideEncryptionKmsKeyId: ").append(getServerSideEncryptionKmsKeyId()).append(",");
if (getDataFormat() != null)
sb.append("DataFormat: ").append(getDataFormat()).append(",");
if (getEncodingType() != null)
sb.append("EncodingType: ").append(getEncodingType()).append(",");
if (getDictPageSizeLimit() != null)
sb.append("DictPageSizeLimit: ").append(getDictPageSizeLimit()).append(",");
if (getRowGroupLength() != null)
sb.append("RowGroupLength: ").append(getRowGroupLength()).append(",");
if (getDataPageSize() != null)
sb.append("DataPageSize: ").append(getDataPageSize()).append(",");
if (getParquetVersion() != null)
sb.append("ParquetVersion: ").append(getParquetVersion()).append(",");
if (getEnableStatistics() != null)
sb.append("EnableStatistics: ").append(getEnableStatistics()).append(",");
if (getCdcInsertsOnly() != null)
sb.append("CdcInsertsOnly: ").append(getCdcInsertsOnly());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof S3Settings == false)
return false;
S3Settings other = (S3Settings) obj;
if (other.getServiceAccessRoleArn() == null ^ this.getServiceAccessRoleArn() == null)
return false;
if (other.getServiceAccessRoleArn() != null && other.getServiceAccessRoleArn().equals(this.getServiceAccessRoleArn()) == false)
return false;
if (other.getExternalTableDefinition() == null ^ this.getExternalTableDefinition() == null)
return false;
if (other.getExternalTableDefinition() != null && other.getExternalTableDefinition().equals(this.getExternalTableDefinition()) == false)
return false;
if (other.getCsvRowDelimiter() == null ^ this.getCsvRowDelimiter() == null)
return false;
if (other.getCsvRowDelimiter() != null && other.getCsvRowDelimiter().equals(this.getCsvRowDelimiter()) == false)
return false;
if (other.getCsvDelimiter() == null ^ this.getCsvDelimiter() == null)
return false;
if (other.getCsvDelimiter() != null && other.getCsvDelimiter().equals(this.getCsvDelimiter()) == false)
return false;
if (other.getBucketFolder() == null ^ this.getBucketFolder() == null)
return false;
if (other.getBucketFolder() != null && other.getBucketFolder().equals(this.getBucketFolder()) == false)
return false;
if (other.getBucketName() == null ^ this.getBucketName() == null)
return false;
if (other.getBucketName() != null && other.getBucketName().equals(this.getBucketName()) == false)
return false;
if (other.getCompressionType() == null ^ this.getCompressionType() == null)
return false;
if (other.getCompressionType() != null && other.getCompressionType().equals(this.getCompressionType()) == false)
return false;
if (other.getEncryptionMode() == null ^ this.getEncryptionMode() == null)
return false;
if (other.getEncryptionMode() != null && other.getEncryptionMode().equals(this.getEncryptionMode()) == false)
return false;
if (other.getServerSideEncryptionKmsKeyId() == null ^ this.getServerSideEncryptionKmsKeyId() == null)
return false;
if (other.getServerSideEncryptionKmsKeyId() != null && other.getServerSideEncryptionKmsKeyId().equals(this.getServerSideEncryptionKmsKeyId()) == false)
return false;
if (other.getDataFormat() == null ^ this.getDataFormat() == null)
return false;
if (other.getDataFormat() != null && other.getDataFormat().equals(this.getDataFormat()) == false)
return false;
if (other.getEncodingType() == null ^ this.getEncodingType() == null)
return false;
if (other.getEncodingType() != null && other.getEncodingType().equals(this.getEncodingType()) == false)
return false;
if (other.getDictPageSizeLimit() == null ^ this.getDictPageSizeLimit() == null)
return false;
if (other.getDictPageSizeLimit() != null && other.getDictPageSizeLimit().equals(this.getDictPageSizeLimit()) == false)
return false;
if (other.getRowGroupLength() == null ^ this.getRowGroupLength() == null)
return false;
if (other.getRowGroupLength() != null && other.getRowGroupLength().equals(this.getRowGroupLength()) == false)
return false;
if (other.getDataPageSize() == null ^ this.getDataPageSize() == null)
return false;
if (other.getDataPageSize() != null && other.getDataPageSize().equals(this.getDataPageSize()) == false)
return false;
if (other.getParquetVersion() == null ^ this.getParquetVersion() == null)
return false;
if (other.getParquetVersion() != null && other.getParquetVersion().equals(this.getParquetVersion()) == false)
return false;
if (other.getEnableStatistics() == null ^ this.getEnableStatistics() == null)
return false;
if (other.getEnableStatistics() != null && other.getEnableStatistics().equals(this.getEnableStatistics()) == false)
return false;
if (other.getCdcInsertsOnly() == null ^ this.getCdcInsertsOnly() == null)
return false;
if (other.getCdcInsertsOnly() != null && other.getCdcInsertsOnly().equals(this.getCdcInsertsOnly()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getServiceAccessRoleArn() == null) ? 0 : getServiceAccessRoleArn().hashCode());
hashCode = prime * hashCode + ((getExternalTableDefinition() == null) ? 0 : getExternalTableDefinition().hashCode());
hashCode = prime * hashCode + ((getCsvRowDelimiter() == null) ? 0 : getCsvRowDelimiter().hashCode());
hashCode = prime * hashCode + ((getCsvDelimiter() == null) ? 0 : getCsvDelimiter().hashCode());
hashCode = prime * hashCode + ((getBucketFolder() == null) ? 0 : getBucketFolder().hashCode());
hashCode = prime * hashCode + ((getBucketName() == null) ? 0 : getBucketName().hashCode());
hashCode = prime * hashCode + ((getCompressionType() == null) ? 0 : getCompressionType().hashCode());
hashCode = prime * hashCode + ((getEncryptionMode() == null) ? 0 : getEncryptionMode().hashCode());
hashCode = prime * hashCode + ((getServerSideEncryptionKmsKeyId() == null) ? 0 : getServerSideEncryptionKmsKeyId().hashCode());
hashCode = prime * hashCode + ((getDataFormat() == null) ? 0 : getDataFormat().hashCode());
hashCode = prime * hashCode + ((getEncodingType() == null) ? 0 : getEncodingType().hashCode());
hashCode = prime * hashCode + ((getDictPageSizeLimit() == null) ? 0 : getDictPageSizeLimit().hashCode());
hashCode = prime * hashCode + ((getRowGroupLength() == null) ? 0 : getRowGroupLength().hashCode());
hashCode = prime * hashCode + ((getDataPageSize() == null) ? 0 : getDataPageSize().hashCode());
hashCode = prime * hashCode + ((getParquetVersion() == null) ? 0 : getParquetVersion().hashCode());
hashCode = prime * hashCode + ((getEnableStatistics() == null) ? 0 : getEnableStatistics().hashCode());
hashCode = prime * hashCode + ((getCdcInsertsOnly() == null) ? 0 : getCdcInsertsOnly().hashCode());
return hashCode;
}
@Override
public S3Settings clone() {
try {
return (S3Settings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.S3SettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}