
com.amazonaws.services.databasemigrationservice.model.KafkaSettings Maven / Gradle / Ivy
/*
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.databasemigrationservice.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* Provides information that describes an Apache Kafka endpoint. This information includes the output format of records
* applied to the endpoint and details of transaction and control table data information.
*
*
* @see AWS API
* Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class KafkaSettings implements Serializable, Cloneable, StructuredPojo {
/**
*
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*
*/
private String broker;
/**
*
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*/
private String topic;
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*/
private String messageFormat;
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*/
private Boolean includeTransactionDetails;
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*/
private Boolean includePartitionValue;
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*/
private Boolean partitionIncludeSchemaTable;
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*/
private Boolean includeTableAlterOperations;
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*/
private Boolean includeControlDetails;
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*/
private Integer messageMaxBytes;
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*/
private Boolean includeNullAndEmpty;
/**
*
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*
*
* @param broker
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the
* form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*/
public void setBroker(String broker) {
this.broker = broker;
}
/**
*
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*
*
* @return The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in
* the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*/
public String getBroker() {
return this.broker;
}
/**
*
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*
*
* @param broker
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the
* form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withBroker(String broker) {
setBroker(broker);
return this;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @param topic
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*/
public void setTopic(String topic) {
this.topic = topic;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @return The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*/
public String getTopic() {
return this.topic;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @param topic
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withTopic(String topic) {
setTopic(topic);
return this;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public void setMessageFormat(String messageFormat) {
this.messageFormat = messageFormat;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @return The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public String getMessageFormat() {
return this.messageFormat;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @return Returns a reference to this object so that method calls can be chained together.
* @see MessageFormatValue
*/
public KafkaSettings withMessageFormat(String messageFormat) {
setMessageFormat(messageFormat);
return this;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* @param messageFormat
* The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @return Returns a reference to this object so that method calls can be chained together.
* @see MessageFormatValue
*/
public KafkaSettings withMessageFormat(MessageFormatValue messageFormat) {
this.messageFormat = messageFormat.toString();
return this;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @param includeTransactionDetails
* Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public void setIncludeTransactionDetails(Boolean includeTransactionDetails) {
this.includeTransactionDetails = includeTransactionDetails;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @return Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public Boolean getIncludeTransactionDetails() {
return this.includeTransactionDetails;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @param includeTransactionDetails
* Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeTransactionDetails(Boolean includeTransactionDetails) {
setIncludeTransactionDetails(includeTransactionDetails);
return this;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @return Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public Boolean isIncludeTransactionDetails() {
return this.includeTransactionDetails;
}
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @param includePartitionValue
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*/
public void setIncludePartitionValue(Boolean includePartitionValue) {
this.includePartitionValue = includePartitionValue;
}
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @return Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*/
public Boolean getIncludePartitionValue() {
return this.includePartitionValue;
}
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @param includePartitionValue
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludePartitionValue(Boolean includePartitionValue) {
setIncludePartitionValue(includePartitionValue);
return this;
}
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @return Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*/
public Boolean isIncludePartitionValue() {
return this.includePartitionValue;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @param partitionIncludeSchemaTable
* Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For example,
* suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary
* key. In this case, the same primary key is sent from thousands of tables to the same partition, which
* causes throttling. The default is false
.
*/
public void setPartitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) {
this.partitionIncludeSchemaTable = partitionIncludeSchemaTable;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @return Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For
* example, suppose that a SysBench schema has thousands of tables and each table has only limited range for
* a primary key. In this case, the same primary key is sent from thousands of tables to the same partition,
* which causes throttling. The default is false
.
*/
public Boolean getPartitionIncludeSchemaTable() {
return this.partitionIncludeSchemaTable;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @param partitionIncludeSchemaTable
* Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For example,
* suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary
* key. In this case, the same primary key is sent from thousands of tables to the same partition, which
* causes throttling. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withPartitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) {
setPartitionIncludeSchemaTable(partitionIncludeSchemaTable);
return this;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @return Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For
* example, suppose that a SysBench schema has thousands of tables and each table has only limited range for
* a primary key. In this case, the same primary key is sent from thousands of tables to the same partition,
* which causes throttling. The default is false
.
*/
public Boolean isPartitionIncludeSchemaTable() {
return this.partitionIncludeSchemaTable;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @param includeTableAlterOperations
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*/
public void setIncludeTableAlterOperations(Boolean includeTableAlterOperations) {
this.includeTableAlterOperations = includeTableAlterOperations;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @return Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
,
* and rename-column
. The default is false
.
*/
public Boolean getIncludeTableAlterOperations() {
return this.includeTableAlterOperations;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @param includeTableAlterOperations
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeTableAlterOperations(Boolean includeTableAlterOperations) {
setIncludeTableAlterOperations(includeTableAlterOperations);
return this;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @return Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
,
* and rename-column
. The default is false
.
*/
public Boolean isIncludeTableAlterOperations() {
return this.includeTableAlterOperations;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @param includeControlDetails
* Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public void setIncludeControlDetails(Boolean includeControlDetails) {
this.includeControlDetails = includeControlDetails;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @return Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public Boolean getIncludeControlDetails() {
return this.includeControlDetails;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @param includeControlDetails
* Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeControlDetails(Boolean includeControlDetails) {
setIncludeControlDetails(includeControlDetails);
return this;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @return Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public Boolean isIncludeControlDetails() {
return this.includeControlDetails;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @param messageMaxBytes
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*/
public void setMessageMaxBytes(Integer messageMaxBytes) {
this.messageMaxBytes = messageMaxBytes;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @return The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*/
public Integer getMessageMaxBytes() {
return this.messageMaxBytes;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @param messageMaxBytes
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withMessageMaxBytes(Integer messageMaxBytes) {
setMessageMaxBytes(messageMaxBytes);
return this;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @param includeNullAndEmpty
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public void setIncludeNullAndEmpty(Boolean includeNullAndEmpty) {
this.includeNullAndEmpty = includeNullAndEmpty;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @return Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public Boolean getIncludeNullAndEmpty() {
return this.includeNullAndEmpty;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @param includeNullAndEmpty
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public KafkaSettings withIncludeNullAndEmpty(Boolean includeNullAndEmpty) {
setIncludeNullAndEmpty(includeNullAndEmpty);
return this;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @return Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public Boolean isIncludeNullAndEmpty() {
return this.includeNullAndEmpty;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBroker() != null)
sb.append("Broker: ").append(getBroker()).append(",");
if (getTopic() != null)
sb.append("Topic: ").append(getTopic()).append(",");
if (getMessageFormat() != null)
sb.append("MessageFormat: ").append(getMessageFormat()).append(",");
if (getIncludeTransactionDetails() != null)
sb.append("IncludeTransactionDetails: ").append(getIncludeTransactionDetails()).append(",");
if (getIncludePartitionValue() != null)
sb.append("IncludePartitionValue: ").append(getIncludePartitionValue()).append(",");
if (getPartitionIncludeSchemaTable() != null)
sb.append("PartitionIncludeSchemaTable: ").append(getPartitionIncludeSchemaTable()).append(",");
if (getIncludeTableAlterOperations() != null)
sb.append("IncludeTableAlterOperations: ").append(getIncludeTableAlterOperations()).append(",");
if (getIncludeControlDetails() != null)
sb.append("IncludeControlDetails: ").append(getIncludeControlDetails()).append(",");
if (getMessageMaxBytes() != null)
sb.append("MessageMaxBytes: ").append(getMessageMaxBytes()).append(",");
if (getIncludeNullAndEmpty() != null)
sb.append("IncludeNullAndEmpty: ").append(getIncludeNullAndEmpty());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof KafkaSettings == false)
return false;
KafkaSettings other = (KafkaSettings) obj;
if (other.getBroker() == null ^ this.getBroker() == null)
return false;
if (other.getBroker() != null && other.getBroker().equals(this.getBroker()) == false)
return false;
if (other.getTopic() == null ^ this.getTopic() == null)
return false;
if (other.getTopic() != null && other.getTopic().equals(this.getTopic()) == false)
return false;
if (other.getMessageFormat() == null ^ this.getMessageFormat() == null)
return false;
if (other.getMessageFormat() != null && other.getMessageFormat().equals(this.getMessageFormat()) == false)
return false;
if (other.getIncludeTransactionDetails() == null ^ this.getIncludeTransactionDetails() == null)
return false;
if (other.getIncludeTransactionDetails() != null && other.getIncludeTransactionDetails().equals(this.getIncludeTransactionDetails()) == false)
return false;
if (other.getIncludePartitionValue() == null ^ this.getIncludePartitionValue() == null)
return false;
if (other.getIncludePartitionValue() != null && other.getIncludePartitionValue().equals(this.getIncludePartitionValue()) == false)
return false;
if (other.getPartitionIncludeSchemaTable() == null ^ this.getPartitionIncludeSchemaTable() == null)
return false;
if (other.getPartitionIncludeSchemaTable() != null && other.getPartitionIncludeSchemaTable().equals(this.getPartitionIncludeSchemaTable()) == false)
return false;
if (other.getIncludeTableAlterOperations() == null ^ this.getIncludeTableAlterOperations() == null)
return false;
if (other.getIncludeTableAlterOperations() != null && other.getIncludeTableAlterOperations().equals(this.getIncludeTableAlterOperations()) == false)
return false;
if (other.getIncludeControlDetails() == null ^ this.getIncludeControlDetails() == null)
return false;
if (other.getIncludeControlDetails() != null && other.getIncludeControlDetails().equals(this.getIncludeControlDetails()) == false)
return false;
if (other.getMessageMaxBytes() == null ^ this.getMessageMaxBytes() == null)
return false;
if (other.getMessageMaxBytes() != null && other.getMessageMaxBytes().equals(this.getMessageMaxBytes()) == false)
return false;
if (other.getIncludeNullAndEmpty() == null ^ this.getIncludeNullAndEmpty() == null)
return false;
if (other.getIncludeNullAndEmpty() != null && other.getIncludeNullAndEmpty().equals(this.getIncludeNullAndEmpty()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBroker() == null) ? 0 : getBroker().hashCode());
hashCode = prime * hashCode + ((getTopic() == null) ? 0 : getTopic().hashCode());
hashCode = prime * hashCode + ((getMessageFormat() == null) ? 0 : getMessageFormat().hashCode());
hashCode = prime * hashCode + ((getIncludeTransactionDetails() == null) ? 0 : getIncludeTransactionDetails().hashCode());
hashCode = prime * hashCode + ((getIncludePartitionValue() == null) ? 0 : getIncludePartitionValue().hashCode());
hashCode = prime * hashCode + ((getPartitionIncludeSchemaTable() == null) ? 0 : getPartitionIncludeSchemaTable().hashCode());
hashCode = prime * hashCode + ((getIncludeTableAlterOperations() == null) ? 0 : getIncludeTableAlterOperations().hashCode());
hashCode = prime * hashCode + ((getIncludeControlDetails() == null) ? 0 : getIncludeControlDetails().hashCode());
hashCode = prime * hashCode + ((getMessageMaxBytes() == null) ? 0 : getMessageMaxBytes().hashCode());
hashCode = prime * hashCode + ((getIncludeNullAndEmpty() == null) ? 0 : getIncludeNullAndEmpty().hashCode());
return hashCode;
}
@Override
public KafkaSettings clone() {
try {
return (KafkaSettings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.databasemigrationservice.model.transform.KafkaSettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}