software.amazon.awssdk.services.databasemigration.model.KafkaSettings Maven / Gradle / Ivy
Show all versions of databasemigration Show documentation
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package software.amazon.awssdk.services.databasemigration.model;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiConsumer;
import java.util.function.Function;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.core.SdkField;
import software.amazon.awssdk.core.SdkPojo;
import software.amazon.awssdk.core.protocol.MarshallLocation;
import software.amazon.awssdk.core.protocol.MarshallingType;
import software.amazon.awssdk.core.traits.LocationTrait;
import software.amazon.awssdk.utils.ToString;
import software.amazon.awssdk.utils.builder.CopyableBuilder;
import software.amazon.awssdk.utils.builder.ToCopyableBuilder;
/**
*
* Provides information that describes an Apache Kafka endpoint. This information includes the output format of records
* applied to the endpoint and details of transaction and control table data information.
*
*/
@Generated("software.amazon.awssdk:codegen")
public final class KafkaSettings implements SdkPojo, Serializable, ToCopyableBuilder {
private static final SdkField BROKER_FIELD = SdkField. builder(MarshallingType.STRING)
.getter(getter(KafkaSettings::broker)).setter(setter(Builder::broker))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Broker").build()).build();
private static final SdkField TOPIC_FIELD = SdkField. builder(MarshallingType.STRING)
.getter(getter(KafkaSettings::topic)).setter(setter(Builder::topic))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Topic").build()).build();
private static final SdkField MESSAGE_FORMAT_FIELD = SdkField. builder(MarshallingType.STRING)
.getter(getter(KafkaSettings::messageFormatAsString)).setter(setter(Builder::messageFormat))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MessageFormat").build()).build();
private static final SdkField INCLUDE_TRANSACTION_DETAILS_FIELD = SdkField
. builder(MarshallingType.BOOLEAN).getter(getter(KafkaSettings::includeTransactionDetails))
.setter(setter(Builder::includeTransactionDetails))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeTransactionDetails").build())
.build();
private static final SdkField INCLUDE_PARTITION_VALUE_FIELD = SdkField. builder(MarshallingType.BOOLEAN)
.getter(getter(KafkaSettings::includePartitionValue)).setter(setter(Builder::includePartitionValue))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludePartitionValue").build())
.build();
private static final SdkField PARTITION_INCLUDE_SCHEMA_TABLE_FIELD = SdkField
. builder(MarshallingType.BOOLEAN)
.getter(getter(KafkaSettings::partitionIncludeSchemaTable))
.setter(setter(Builder::partitionIncludeSchemaTable))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("PartitionIncludeSchemaTable")
.build()).build();
private static final SdkField INCLUDE_TABLE_ALTER_OPERATIONS_FIELD = SdkField
. builder(MarshallingType.BOOLEAN)
.getter(getter(KafkaSettings::includeTableAlterOperations))
.setter(setter(Builder::includeTableAlterOperations))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeTableAlterOperations")
.build()).build();
private static final SdkField INCLUDE_CONTROL_DETAILS_FIELD = SdkField. builder(MarshallingType.BOOLEAN)
.getter(getter(KafkaSettings::includeControlDetails)).setter(setter(Builder::includeControlDetails))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeControlDetails").build())
.build();
private static final SdkField MESSAGE_MAX_BYTES_FIELD = SdkField. builder(MarshallingType.INTEGER)
.getter(getter(KafkaSettings::messageMaxBytes)).setter(setter(Builder::messageMaxBytes))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MessageMaxBytes").build()).build();
private static final SdkField INCLUDE_NULL_AND_EMPTY_FIELD = SdkField. builder(MarshallingType.BOOLEAN)
.getter(getter(KafkaSettings::includeNullAndEmpty)).setter(setter(Builder::includeNullAndEmpty))
.traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeNullAndEmpty").build())
.build();
private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BROKER_FIELD, TOPIC_FIELD,
MESSAGE_FORMAT_FIELD, INCLUDE_TRANSACTION_DETAILS_FIELD, INCLUDE_PARTITION_VALUE_FIELD,
PARTITION_INCLUDE_SCHEMA_TABLE_FIELD, INCLUDE_TABLE_ALTER_OPERATIONS_FIELD, INCLUDE_CONTROL_DETAILS_FIELD,
MESSAGE_MAX_BYTES_FIELD, INCLUDE_NULL_AND_EMPTY_FIELD));
private static final long serialVersionUID = 1L;
private final String broker;
private final String topic;
private final String messageFormat;
private final Boolean includeTransactionDetails;
private final Boolean includePartitionValue;
private final Boolean partitionIncludeSchemaTable;
private final Boolean includeTableAlterOperations;
private final Boolean includeControlDetails;
private final Integer messageMaxBytes;
private final Boolean includeNullAndEmpty;
private KafkaSettings(BuilderImpl builder) {
this.broker = builder.broker;
this.topic = builder.topic;
this.messageFormat = builder.messageFormat;
this.includeTransactionDetails = builder.includeTransactionDetails;
this.includePartitionValue = builder.includePartitionValue;
this.partitionIncludeSchemaTable = builder.partitionIncludeSchemaTable;
this.includeTableAlterOperations = builder.includeTableAlterOperations;
this.includeControlDetails = builder.includeControlDetails;
this.messageMaxBytes = builder.messageMaxBytes;
this.includeNullAndEmpty = builder.includeNullAndEmpty;
}
/**
*
* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form
* broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*
*
* @return The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in
* the form broker-hostname-or-ip:port
. For example,
* "ec2-12-345-678-901.compute-1.amazonaws.com:2345"
.
*/
public String broker() {
return broker;
}
/**
*
* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*
*
* @return The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies
* "kafka-default-topic"
as the migration topic.
*/
public String topic() {
return topic;
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* If the service returns an enum value that is not available in the current SDK version, {@link #messageFormat}
* will return {@link MessageFormatValue#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available
* from {@link #messageFormatAsString}.
*
*
* @return The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public MessageFormatValue messageFormat() {
return MessageFormatValue.fromValue(messageFormat);
}
/**
*
* The output format for the records created on the endpoint. The message format is JSON
(default) or
* JSON_UNFORMATTED
(a single line with no tab).
*
*
* If the service returns an enum value that is not available in the current SDK version, {@link #messageFormat}
* will return {@link MessageFormatValue#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available
* from {@link #messageFormatAsString}.
*
*
* @return The output format for the records created on the endpoint. The message format is JSON
* (default) or JSON_UNFORMATTED
(a single line with no tab).
* @see MessageFormatValue
*/
public String messageFormatAsString() {
return messageFormat;
}
/**
*
* Provides detailed transaction information from the source database. This information includes a commit timestamp,
* a log position, and values for transaction_id
, previous transaction_id
, and
* transaction_record_id
(the record offset within a transaction). The default is false
.
*
*
* @return Provides detailed transaction information from the source database. This information includes a commit
* timestamp, a log position, and values for transaction_id
, previous
* transaction_id
, and transaction_record_id
(the record offset within a
* transaction). The default is false
.
*/
public Boolean includeTransactionDetails() {
return includeTransactionDetails;
}
/**
*
* Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*
*
* @return Shows the partition value within the Kafka message output, unless the partition type is
* schema-table-type
. The default is false
.
*/
public Boolean includePartitionValue() {
return includePartitionValue;
}
/**
*
* Prefixes schema and table names to partition values, when the partition type is primary-key-type
.
* Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has
* thousands of tables and each table has only limited range for a primary key. In this case, the same primary key
* is sent from thousands of tables to the same partition, which causes throttling. The default is
* false
.
*
*
* @return Prefixes schema and table names to partition values, when the partition type is
* primary-key-type
. Doing this increases data distribution among Kafka partitions. For
* example, suppose that a SysBench schema has thousands of tables and each table has only limited range for
* a primary key. In this case, the same primary key is sent from thousands of tables to the same partition,
* which causes throttling. The default is false
.
*/
public Boolean partitionIncludeSchemaTable() {
return partitionIncludeSchemaTable;
}
/**
*
* Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
, and
* rename-column
. The default is false
.
*
*
* @return Includes any data definition language (DDL) operations that change the table in the control data, such as
* rename-table
, drop-table
, add-column
, drop-column
,
* and rename-column
. The default is false
.
*/
public Boolean includeTableAlterOperations() {
return includeTableAlterOperations;
}
/**
*
* Shows detailed control information for table definition, column definition, and table and column changes in the
* Kafka message output. The default is false
.
*
*
* @return Shows detailed control information for table definition, column definition, and table and column changes
* in the Kafka message output. The default is false
.
*/
public Boolean includeControlDetails() {
return includeControlDetails;
}
/**
*
* The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*
*
* @return The maximum size in bytes for records created on the endpoint The default is 1,000,000.
*/
public Integer messageMaxBytes() {
return messageMaxBytes;
}
/**
*
* Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*
*
* @return Include NULL and empty columns for records migrated to the endpoint. The default is false
.
*/
public Boolean includeNullAndEmpty() {
return includeNullAndEmpty;
}
@Override
public Builder toBuilder() {
return new BuilderImpl(this);
}
public static Builder builder() {
return new BuilderImpl();
}
public static Class extends Builder> serializableBuilderClass() {
return BuilderImpl.class;
}
@Override
public int hashCode() {
int hashCode = 1;
hashCode = 31 * hashCode + Objects.hashCode(broker());
hashCode = 31 * hashCode + Objects.hashCode(topic());
hashCode = 31 * hashCode + Objects.hashCode(messageFormatAsString());
hashCode = 31 * hashCode + Objects.hashCode(includeTransactionDetails());
hashCode = 31 * hashCode + Objects.hashCode(includePartitionValue());
hashCode = 31 * hashCode + Objects.hashCode(partitionIncludeSchemaTable());
hashCode = 31 * hashCode + Objects.hashCode(includeTableAlterOperations());
hashCode = 31 * hashCode + Objects.hashCode(includeControlDetails());
hashCode = 31 * hashCode + Objects.hashCode(messageMaxBytes());
hashCode = 31 * hashCode + Objects.hashCode(includeNullAndEmpty());
return hashCode;
}
@Override
public boolean equals(Object obj) {
return equalsBySdkFields(obj);
}
@Override
public boolean equalsBySdkFields(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof KafkaSettings)) {
return false;
}
KafkaSettings other = (KafkaSettings) obj;
return Objects.equals(broker(), other.broker()) && Objects.equals(topic(), other.topic())
&& Objects.equals(messageFormatAsString(), other.messageFormatAsString())
&& Objects.equals(includeTransactionDetails(), other.includeTransactionDetails())
&& Objects.equals(includePartitionValue(), other.includePartitionValue())
&& Objects.equals(partitionIncludeSchemaTable(), other.partitionIncludeSchemaTable())
&& Objects.equals(includeTableAlterOperations(), other.includeTableAlterOperations())
&& Objects.equals(includeControlDetails(), other.includeControlDetails())
&& Objects.equals(messageMaxBytes(), other.messageMaxBytes())
&& Objects.equals(includeNullAndEmpty(), other.includeNullAndEmpty());
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*/
@Override
public String toString() {
return ToString.builder("KafkaSettings").add("Broker", broker()).add("Topic", topic())
.add("MessageFormat", messageFormatAsString()).add("IncludeTransactionDetails", includeTransactionDetails())
.add("IncludePartitionValue", includePartitionValue())
.add("PartitionIncludeSchemaTable", partitionIncludeSchemaTable())
.add("IncludeTableAlterOperations", includeTableAlterOperations())
.add("IncludeControlDetails", includeControlDetails()).add("MessageMaxBytes", messageMaxBytes())
.add("IncludeNullAndEmpty", includeNullAndEmpty()).build();
}
public Optional getValueForField(String fieldName, Class clazz) {
switch (fieldName) {
case "Broker":
return Optional.ofNullable(clazz.cast(broker()));
case "Topic":
return Optional.ofNullable(clazz.cast(topic()));
case "MessageFormat":
return Optional.ofNullable(clazz.cast(messageFormatAsString()));
case "IncludeTransactionDetails":
return Optional.ofNullable(clazz.cast(includeTransactionDetails()));
case "IncludePartitionValue":
return Optional.ofNullable(clazz.cast(includePartitionValue()));
case "PartitionIncludeSchemaTable":
return Optional.ofNullable(clazz.cast(partitionIncludeSchemaTable()));
case "IncludeTableAlterOperations":
return Optional.ofNullable(clazz.cast(includeTableAlterOperations()));
case "IncludeControlDetails":
return Optional.ofNullable(clazz.cast(includeControlDetails()));
case "MessageMaxBytes":
return Optional.ofNullable(clazz.cast(messageMaxBytes()));
case "IncludeNullAndEmpty":
return Optional.ofNullable(clazz.cast(includeNullAndEmpty()));
default:
return Optional.empty();
}
}
@Override
public List> sdkFields() {
return SDK_FIELDS;
}
private static Function