com.amazonaws.services.databasemigrationservice.model.KafkaSettings Maven / Gradle / Ivy
Show all versions of aws-java-sdk-dms Show documentation
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.databasemigrationservice.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Provides information that describes an Apache Kafka endpoint. This information includes the output format of records
* applied to the endpoint and details of transaction and control table data information.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class KafkaSettings implements Serializable, Cloneable, StructuredPojo {
/**
*
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance.
* Specify each broker location in the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and examples of specifying a
* list of broker locations, see Using Apache Kafka as a target for
* Database Migration Service in the Database Migration Service User Guide.
*
*/
private String broker;
/**
*
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*/
private String topic;
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*/
private String messageFormat;
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*/
private Boolean includeTransactionDetails;
/**
*
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*
*/
private Boolean includePartitionValue;
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*/
private Boolean partitionIncludeSchemaTable;
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*/
private Boolean includeTableAlterOperations;
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*/
private Boolean includeControlDetails;
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*/
private Integer messageMaxBytes;
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*/
private Boolean includeNullAndEmpty;
/**
*
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
. sasl-ssl
* requires SaslUsername
and SaslPassword
.
*
*/
private String securityProtocol;
/**
*
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
*
*/
private String sslClientCertificateArn;
/**
*
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
*
*/
private String sslClientKeyArn;
/**
*
* The password for the client private key used to securely connect to a Kafka target endpoint.
*
*/
private String sslClientKeyPassword;
/**
*
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely connect
* to your Kafka target endpoint.
*
*/
private String sslCaCertificateArn;
/**
*
* The secure user name you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*/
private String saslUsername;
/**
*
* The secure password you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*/
private String saslPassword;
/**
*
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal format.
* For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an
* Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to enable migration of RAW
* data type columns without adding the '0x' prefix.
*
*/
private Boolean noHexPrefix;
/**
*
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance.
* Specify each broker location in the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and examples of specifying a
* list of broker locations, see Using Apache Kafka as a target for
* Database Migration Service in the Database Migration Service User Guide.
*
*
* @param broker
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka
* instance. Specify each broker location in the form broker-hostname-or-ip:port
* . For example, "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and
* examples of specifying a list of broker locations, see Using Apache Kafka as a
* target for Database Migration Service in the Database Migration Service User Guide.
*/
public void setBroker(String broker) {
this.broker = broker;
}
/**
*
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance.
* Specify each broker location in the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and examples of specifying a
* list of broker locations, see Using Apache Kafka as a target for
* Database Migration Service in the Database Migration Service User Guide.
*
*
* @return A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka
* instance. Specify each broker location in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and examples of
* specifying a list of broker locations, see Using Apache Kafka as a
* target for Database Migration Service in the Database Migration Service User Guide.
*/
public String getBroker() {
return this.broker;
}
/**
*
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka instance.
* Specify each broker location in the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and examples of specifying a
* list of broker locations, see Using Apache Kafka as a target for
* Database Migration Service in the Database Migration Service User Guide.
*
*
* @param broker
* A comma-separated list of one or more broker locations in your Kafka cluster that host your Kafka
* instance. Specify each broker location in the form broker-hostname-or-ip:port
* . For example, "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
. For more information and
* examples of specifying a list of broker locations, see Using Apache Kafka as a
* target for Database Migration Service in the Database Migration Service User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withBroker(String broker) {
setBroker(broker);
return this;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @param topic
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*/
public void setTopic(String topic) {
this.topic = topic;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @return The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*/
public String getTopic() {
return this.topic;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @param topic
* The topic to which you migrate the data. If you don't specify a topic, DMS specifies
* "kafka-default-topic"
as the migration topic.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withTopic(String topic) {
setTopic(topic);
return this;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public void setMessageFormat(String messageFormat) {
this.messageFormat = messageFormat;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @return The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public String getMessageFormat() {
return this.messageFormat;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @return Returns a reference to this object so that method calls can be chained together.
* @see MessageFormatValue
*/
public KafkaSettings withMessageFormat(String messageFormat) {
setMessageFormat(messageFormat);
return this;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @return Returns a reference to this object so that method calls can be chained together.
* @see MessageFormatValue
*/
public KafkaSettings withMessageFormat(MessageFormatValue messageFormat) {
this.messageFormat = messageFormat.toString();
return this;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @param includeTransactionDetails
* Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public void setIncludeTransactionDetails(Boolean includeTransactionDetails) {
this.includeTransactionDetails = includeTransactionDetails;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @return Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public Boolean getIncludeTransactionDetails() {
return this.includeTransactionDetails;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @param includeTransactionDetails
* Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeTransactionDetails(Boolean includeTransactionDetails) {
setIncludeTransactionDetails(includeTransactionDetails);
return this;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @return Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public Boolean isIncludeTransactionDetails() {
return this.includeTransactionDetails;
}
/**
*
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @param includePartitionValue
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*/
public void setIncludePartitionValue(Boolean includePartitionValue) {
this.includePartitionValue = includePartitionValue;
}
/**
*
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @return Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*/
public Boolean getIncludePartitionValue() {
return this.includePartitionValue;
}
/**
*
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @param includePartitionValue
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludePartitionValue(Boolean includePartitionValue) {
setIncludePartitionValue(includePartitionValue);
return this;
}
/**
*
* Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @return Shows the partition value within the Kafka message output unless the partition type is
* schema-table-type
. The default is false
.
*/
public Boolean isIncludePartitionValue() {
return this.includePartitionValue;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @param partitionIncludeSchemaTable
* Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For example,
* suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary
* key. In this case, the same primary key is sent from thousands of tables to the same partition, which
* causes throttling. The default is false
.
*/
public void setPartitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) {
this.partitionIncludeSchemaTable = partitionIncludeSchemaTable;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @return Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For
* example, suppose that a SysBench schema has thousands of tables and each table has only limited range for
* a primary key. In this case, the same primary key is sent from thousands of tables to the same partition,
* which causes throttling. The default is false
.
*/
public Boolean getPartitionIncludeSchemaTable() {
return this.partitionIncludeSchemaTable;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @param partitionIncludeSchemaTable
* Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For example,
* suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary
* key. In this case, the same primary key is sent from thousands of tables to the same partition, which
* causes throttling. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withPartitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) {
setPartitionIncludeSchemaTable(partitionIncludeSchemaTable);
return this;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @return Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For
* example, suppose that a SysBench schema has thousands of tables and each table has only limited range for
* a primary key. In this case, the same primary key is sent from thousands of tables to the same partition,
* which causes throttling. The default is false
.
*/
public Boolean isPartitionIncludeSchemaTable() {
return this.partitionIncludeSchemaTable;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @param includeTableAlterOperations
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*/
public void setIncludeTableAlterOperations(Boolean includeTableAlterOperations) {
this.includeTableAlterOperations = includeTableAlterOperations;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @return Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
,
* and rename-column
. The default is false
.
*/
public Boolean getIncludeTableAlterOperations() {
return this.includeTableAlterOperations;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @param includeTableAlterOperations
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeTableAlterOperations(Boolean includeTableAlterOperations) {
setIncludeTableAlterOperations(includeTableAlterOperations);
return this;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @return Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
,
* and rename-column
. The default is false
.
*/
public Boolean isIncludeTableAlterOperations() {
return this.includeTableAlterOperations;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @param includeControlDetails
* Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public void setIncludeControlDetails(Boolean includeControlDetails) {
this.includeControlDetails = includeControlDetails;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @return Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public Boolean getIncludeControlDetails() {
return this.includeControlDetails;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @param includeControlDetails
* Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeControlDetails(Boolean includeControlDetails) {
setIncludeControlDetails(includeControlDetails);
return this;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @return Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public Boolean isIncludeControlDetails() {
return this.includeControlDetails;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @param messageMaxBytes
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*/
public void setMessageMaxBytes(Integer messageMaxBytes) {
this.messageMaxBytes = messageMaxBytes;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @return The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*/
public Integer getMessageMaxBytes() {
return this.messageMaxBytes;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @param messageMaxBytes
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withMessageMaxBytes(Integer messageMaxBytes) {
setMessageMaxBytes(messageMaxBytes);
return this;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @param includeNullAndEmpty
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public void setIncludeNullAndEmpty(Boolean includeNullAndEmpty) {
this.includeNullAndEmpty = includeNullAndEmpty;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @return Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public Boolean getIncludeNullAndEmpty() {
return this.includeNullAndEmpty;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @param includeNullAndEmpty
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeNullAndEmpty(Boolean includeNullAndEmpty) {
setIncludeNullAndEmpty(includeNullAndEmpty);
return this;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @return Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public Boolean isIncludeNullAndEmpty() {
return this.includeNullAndEmpty;
}
/**
*
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
. sasl-ssl
* requires SaslUsername
and SaslPassword
.
*
*
* @param securityProtocol
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
.
* sasl-ssl
requires SaslUsername
and SaslPassword
.
* @see KafkaSecurityProtocol
*/
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
/**
*
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
. sasl-ssl
* requires SaslUsername
and SaslPassword
.
*
*
* @return Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
.
* sasl-ssl
requires SaslUsername
and SaslPassword
.
* @see KafkaSecurityProtocol
*/
public String getSecurityProtocol() {
return this.securityProtocol;
}
/**
*
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
. sasl-ssl
* requires SaslUsername
and SaslPassword
.
*
*
* @param securityProtocol
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
.
* sasl-ssl
requires SaslUsername
and SaslPassword
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see KafkaSecurityProtocol
*/
public KafkaSettings withSecurityProtocol(String securityProtocol) {
setSecurityProtocol(securityProtocol);
return this;
}
/**
*
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
. sasl-ssl
* requires SaslUsername
and SaslPassword
.
*
*
* @param securityProtocol
* Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include
* ssl-encryption
, ssl-authentication
, and sasl-ssl
.
* sasl-ssl
requires SaslUsername
and SaslPassword
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see KafkaSecurityProtocol
*/
public KafkaSettings withSecurityProtocol(KafkaSecurityProtocol securityProtocol) {
this.securityProtocol = securityProtocol.toString();
return this;
}
/**
*
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientCertificateArn
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target
* endpoint.
*/
public void setSslClientCertificateArn(String sslClientCertificateArn) {
this.sslClientCertificateArn = sslClientCertificateArn;
}
/**
*
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
*
*
* @return The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target
* endpoint.
*/
public String getSslClientCertificateArn() {
return this.sslClientCertificateArn;
}
/**
*
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientCertificateArn
* The Amazon Resource Name (ARN) of the client certificate used to securely connect to a Kafka target
* endpoint.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSslClientCertificateArn(String sslClientCertificateArn) {
setSslClientCertificateArn(sslClientCertificateArn);
return this;
}
/**
*
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientKeyArn
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target
* endpoint.
*/
public void setSslClientKeyArn(String sslClientKeyArn) {
this.sslClientKeyArn = sslClientKeyArn;
}
/**
*
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @return The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target
* endpoint.
*/
public String getSslClientKeyArn() {
return this.sslClientKeyArn;
}
/**
*
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientKeyArn
* The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target
* endpoint.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSslClientKeyArn(String sslClientKeyArn) {
setSslClientKeyArn(sslClientKeyArn);
return this;
}
/**
*
* The password for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientKeyPassword
* The password for the client private key used to securely connect to a Kafka target endpoint.
*/
public void setSslClientKeyPassword(String sslClientKeyPassword) {
this.sslClientKeyPassword = sslClientKeyPassword;
}
/**
*
* The password for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @return The password for the client private key used to securely connect to a Kafka target endpoint.
*/
public String getSslClientKeyPassword() {
return this.sslClientKeyPassword;
}
/**
*
* The password for the client private key used to securely connect to a Kafka target endpoint.
*
*
* @param sslClientKeyPassword
* The password for the client private key used to securely connect to a Kafka target endpoint.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSslClientKeyPassword(String sslClientKeyPassword) {
setSslClientKeyPassword(sslClientKeyPassword);
return this;
}
/**
*
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely connect
* to your Kafka target endpoint.
*
*
* @param sslCaCertificateArn
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely
* connect to your Kafka target endpoint.
*/
public void setSslCaCertificateArn(String sslCaCertificateArn) {
this.sslCaCertificateArn = sslCaCertificateArn;
}
/**
*
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely connect
* to your Kafka target endpoint.
*
*
* @return The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely
* connect to your Kafka target endpoint.
*/
public String getSslCaCertificateArn() {
return this.sslCaCertificateArn;
}
/**
*
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely connect
* to your Kafka target endpoint.
*
*
* @param sslCaCertificateArn
* The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely
* connect to your Kafka target endpoint.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSslCaCertificateArn(String sslCaCertificateArn) {
setSslCaCertificateArn(sslCaCertificateArn);
return this;
}
/**
*
* The secure user name you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @param saslUsername
* The secure user name you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
*/
public void setSaslUsername(String saslUsername) {
this.saslUsername = saslUsername;
}
/**
*
* The secure user name you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @return The secure user name you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
*/
public String getSaslUsername() {
return this.saslUsername;
}
/**
*
* The secure user name you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @param saslUsername
* The secure user name you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSaslUsername(String saslUsername) {
setSaslUsername(saslUsername);
return this;
}
/**
*
* The secure password you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @param saslPassword
* The secure password you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
*/
public void setSaslPassword(String saslPassword) {
this.saslPassword = saslPassword;
}
/**
*
* The secure password you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @return The secure password you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
*/
public String getSaslPassword() {
return this.saslPassword;
}
/**
*
* The secure password you created when you first set up your MSK cluster to validate a client identity and make an
* encrypted connection between server and client using SASL-SSL authentication.
*
*
* @param saslPassword
* The secure password you created when you first set up your MSK cluster to validate a client identity and
* make an encrypted connection between server and client using SASL-SSL authentication.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withSaslPassword(String saslPassword) {
setSaslPassword(saslPassword);
return this;
}
/**
*
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal format.
* For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an
* Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to enable migration of RAW
* data type columns without adding the '0x' prefix.
*
*
* @param noHexPrefix
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal
* format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format
* moving from an Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to
* enable migration of RAW data type columns without adding the '0x' prefix.
*/
public void setNoHexPrefix(Boolean noHexPrefix) {
this.noHexPrefix = noHexPrefix;
}
/**
*
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal format.
* For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an
* Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to enable migration of RAW
* data type columns without adding the '0x' prefix.
*
*
* @return Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal
* format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format
* moving from an Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to
* enable migration of RAW data type columns without adding the '0x' prefix.
*/
public Boolean getNoHexPrefix() {
return this.noHexPrefix;
}
/**
*
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal format.
* For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an
* Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to enable migration of RAW
* data type columns without adding the '0x' prefix.
*
*
* @param noHexPrefix
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal
* format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format
* moving from an Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to
* enable migration of RAW data type columns without adding the '0x' prefix.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withNoHexPrefix(Boolean noHexPrefix) {
setNoHexPrefix(noHexPrefix);
return this;
}
/**
*
* Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal format.
* For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an
* Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to enable migration of RAW
* data type columns without adding the '0x' prefix.
*
*
* @return Set this optional parameter to true
to avoid adding a '0x' prefix to raw data in hexadecimal
* format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format
* moving from an Oracle source to a Kafka target. Use the NoHexPrefix
endpoint setting to
* enable migration of RAW data type columns without adding the '0x' prefix.
*/
public Boolean isNoHexPrefix() {
return this.noHexPrefix;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBroker() != null)
sb.append("Broker: ").append(getBroker()).append(",");
if (getTopic() != null)
sb.append("Topic: ").append(getTopic()).append(",");
if (getMessageFormat() != null)
sb.append("MessageFormat: ").append(getMessageFormat()).append(",");
if (getIncludeTransactionDetails() != null)
sb.append("IncludeTransactionDetails: ").append(getIncludeTransactionDetails()).append(",");
if (getIncludePartitionValue() != null)
sb.append("IncludePartitionValue: ").append(getIncludePartitionValue()).append(",");
if (getPartitionIncludeSchemaTable() != null)
sb.append("PartitionIncludeSchemaTable: ").append(getPartitionIncludeSchemaTable()).append(",");
if (getIncludeTableAlterOperations() != null)
sb.append("IncludeTableAlterOperations: ").append(getIncludeTableAlterOperations()).append(",");
if (getIncludeControlDetails() != null)
sb.append("IncludeControlDetails: ").append(getIncludeControlDetails()).append(",");
if (getMessageMaxBytes() != null)
sb.append("MessageMaxBytes: ").append(getMessageMaxBytes()).append(",");
if (getIncludeNullAndEmpty() != null)
sb.append("IncludeNullAndEmpty: ").append(getIncludeNullAndEmpty()).append(",");
if (getSecurityProtocol() != null)
sb.append("SecurityProtocol: ").append(getSecurityProtocol()).append(",");
if (getSslClientCertificateArn() != null)
sb.append("SslClientCertificateArn: ").append(getSslClientCertificateArn()).append(",");
if (getSslClientKeyArn() != null)
sb.append("SslClientKeyArn: ").append(getSslClientKeyArn()).append(",");
if (getSslClientKeyPassword() != null)
sb.append("SslClientKeyPassword: ").append("***Sensitive Data Redacted***").append(",");
if (getSslCaCertificateArn() != null)
sb.append("SslCaCertificateArn: ").append(getSslCaCertificateArn()).append(",");
if (getSaslUsername() != null)
sb.append("SaslUsername: ").append(getSaslUsername()).append(",");
if (getSaslPassword() != null)
sb.append("SaslPassword: ").append("***Sensitive Data Redacted***").append(",");
if (getNoHexPrefix() != null)
sb.append("NoHexPrefix: ").append(getNoHexPrefix());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof KafkaSettings == false)
return false;
KafkaSettings other = (KafkaSettings) obj;
if (other.getBroker() == null ^ this.getBroker() == null)
return false;
if (other.getBroker() != null && other.getBroker().equals(this.getBroker()) == false)
return false;
if (other.getTopic() == null ^ this.getTopic() == null)
return false;
if (other.getTopic() != null && other.getTopic().equals(this.getTopic()) == false)
return false;
if (other.getMessageFormat() == null ^ this.getMessageFormat() == null)
return false;
if (other.getMessageFormat() != null && other.getMessageFormat().equals(this.getMessageFormat()) == false)
return false;
if (other.getIncludeTransactionDetails() == null ^ this.getIncludeTransactionDetails() == null)
return false;
if (other.getIncludeTransactionDetails() != null && other.getIncludeTransactionDetails().equals(this.getIncludeTransactionDetails()) == false)
return false;
if (other.getIncludePartitionValue() == null ^ this.getIncludePartitionValue() == null)
return false;
if (other.getIncludePartitionValue() != null && other.getIncludePartitionValue().equals(this.getIncludePartitionValue()) == false)
return false;
if (other.getPartitionIncludeSchemaTable() == null ^ this.getPartitionIncludeSchemaTable() == null)
return false;
if (other.getPartitionIncludeSchemaTable() != null && other.getPartitionIncludeSchemaTable().equals(this.getPartitionIncludeSchemaTable()) == false)
return false;
if (other.getIncludeTableAlterOperations() == null ^ this.getIncludeTableAlterOperations() == null)
return false;
if (other.getIncludeTableAlterOperations() != null && other.getIncludeTableAlterOperations().equals(this.getIncludeTableAlterOperations()) == false)
return false;
if (other.getIncludeControlDetails() == null ^ this.getIncludeControlDetails() == null)
return false;
if (other.getIncludeControlDetails() != null && other.getIncludeControlDetails().equals(this.getIncludeControlDetails()) == false)
return false;
if (other.getMessageMaxBytes() == null ^ this.getMessageMaxBytes() == null)
return false;
if (other.getMessageMaxBytes() != null && other.getMessageMaxBytes().equals(this.getMessageMaxBytes()) == false)
return false;
if (other.getIncludeNullAndEmpty() == null ^ this.getIncludeNullAndEmpty() == null)
return false;
if (other.getIncludeNullAndEmpty() != null && other.getIncludeNullAndEmpty().equals(this.getIncludeNullAndEmpty()) == false)
return false;
if (other.getSecurityProtocol() == null ^ this.getSecurityProtocol() == null)
return false;
if (other.getSecurityProtocol() != null && other.getSecurityProtocol().equals(this.getSecurityProtocol()) == false)
return false;
if (other.getSslClientCertificateArn() == null ^ this.getSslClientCertificateArn() == null)
return false;
if (other.getSslClientCertificateArn() != null && other.getSslClientCertificateArn().equals(this.getSslClientCertificateArn()) == false)
return false;
if (other.getSslClientKeyArn() == null ^ this.getSslClientKeyArn() == null)
return false;
if (other.getSslClientKeyArn() != null && other.getSslClientKeyArn().equals(this.getSslClientKeyArn()) == false)
return false;
if (other.getSslClientKeyPassword() == null ^ this.getSslClientKeyPassword() == null)
return false;
if (other.getSslClientKeyPassword() != null && other.getSslClientKeyPassword().equals(this.getSslClientKeyPassword()) == false)
return false;
if (other.getSslCaCertificateArn() == null ^ this.getSslCaCertificateArn() == null)
return false;
if (other.getSslCaCertificateArn() != null && other.getSslCaCertificateArn().equals(this.getSslCaCertificateArn()) == false)
return false;
if (other.getSaslUsername() == null ^ this.getSaslUsername() == null)
return false;
if (other.getSaslUsername() != null && other.getSaslUsername().equals(this.getSaslUsername()) == false)
return false;
if (other.getSaslPassword() == null ^ this.getSaslPassword() == null)
return false;
if (other.getSaslPassword() != null && other.getSaslPassword().equals(this.getSaslPassword()) == false)
return false;
if (other.getNoHexPrefix() == null ^ this.getNoHexPrefix() == null)
return false;
if (other.getNoHexPrefix() != null && other.getNoHexPrefix().equals(this.getNoHexPrefix()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBroker() == null) ? 0 : getBroker().hashCode());
hashCode = prime * hashCode + ((getTopic() == null) ? 0 : getTopic().hashCode());
hashCode = prime * hashCode + ((getMessageFormat() == null) ? 0 : getMessageFormat().hashCode());
hashCode = prime * hashCode + ((getIncludeTransactionDetails() == null) ? 0 : getIncludeTransactionDetails().hashCode());
hashCode = prime * hashCode + ((getIncludePartitionValue() == null) ? 0 : getIncludePartitionValue().hashCode());
hashCode = prime * hashCode + ((getPartitionIncludeSchemaTable() == null) ? 0 : getPartitionIncludeSchemaTable().hashCode());
hashCode = prime * hashCode + ((getIncludeTableAlterOperations() == null) ? 0 : getIncludeTableAlterOperations().hashCode());
hashCode = prime * hashCode + ((getIncludeControlDetails() == null) ? 0 : getIncludeControlDetails().hashCode());
hashCode = prime * hashCode + ((getMessageMaxBytes() == null) ? 0 : getMessageMaxBytes().hashCode());
hashCode = prime * hashCode + ((getIncludeNullAndEmpty() == null) ? 0 : getIncludeNullAndEmpty().hashCode());
hashCode = prime * hashCode + ((getSecurityProtocol() == null) ? 0 : getSecurityProtocol().hashCode());
hashCode = prime * hashCode + ((getSslClientCertificateArn() == null) ? 0 : getSslClientCertificateArn().hashCode());
hashCode = prime * hashCode + ((getSslClientKeyArn() == null) ? 0 : getSslClientKeyArn().hashCode());
hashCode = prime * hashCode + ((getSslClientKeyPassword() == null) ? 0 : getSslClientKeyPassword().hashCode());
hashCode = prime * hashCode + ((getSslCaCertificateArn() == null) ? 0 : getSslCaCertificateArn().hashCode());
hashCode = prime * hashCode + ((getSaslUsername() == null) ? 0 : getSaslUsername().hashCode());
hashCode = prime * hashCode + ((getSaslPassword() == null) ? 0 : getSaslPassword().hashCode());
hashCode = prime * hashCode + ((getNoHexPrefix() == null) ? 0 : getNoHexPrefix().hashCode());
return hashCode;
}
@Override
public KafkaSettings clone() {
try {
return (KafkaSettings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.KafkaSettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}