All Downloads are FREE. Search and download functionalities are using the official Maven repository.

software.amazon.awssdk.services.databasemigration.model.KafkaSettings Maven / Gradle / Ivy

Go to download

The AWS Java SDK for AWS Database Migration Service module holds the client classes that are used for communicating with AWS Database Migration Service.

There is a newer version: 2.30.1
Show newest version
/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */

package software.amazon.awssdk.services.databasemigration.model;

import java.io.Serializable;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.function.BiConsumer;
import java.util.function.Function;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.core.SdkField;
import software.amazon.awssdk.core.SdkPojo;
import software.amazon.awssdk.core.protocol.MarshallLocation;
import software.amazon.awssdk.core.protocol.MarshallingType;
import software.amazon.awssdk.core.traits.LocationTrait;
import software.amazon.awssdk.utils.ToString;
import software.amazon.awssdk.utils.builder.CopyableBuilder;
import software.amazon.awssdk.utils.builder.ToCopyableBuilder;

/**
 * 

* Provides information that describes an Apache Kafka endpoint. This information includes the output format of records * applied to the endpoint and details of transaction and control table data information. *

*/ @Generated("software.amazon.awssdk:codegen") public final class KafkaSettings implements SdkPojo, Serializable, ToCopyableBuilder { private static final SdkField BROKER_FIELD = SdkField. builder(MarshallingType.STRING) .getter(getter(KafkaSettings::broker)).setter(setter(Builder::broker)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Broker").build()).build(); private static final SdkField TOPIC_FIELD = SdkField. builder(MarshallingType.STRING) .getter(getter(KafkaSettings::topic)).setter(setter(Builder::topic)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("Topic").build()).build(); private static final SdkField MESSAGE_FORMAT_FIELD = SdkField. builder(MarshallingType.STRING) .getter(getter(KafkaSettings::messageFormatAsString)).setter(setter(Builder::messageFormat)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MessageFormat").build()).build(); private static final SdkField INCLUDE_TRANSACTION_DETAILS_FIELD = SdkField . builder(MarshallingType.BOOLEAN).getter(getter(KafkaSettings::includeTransactionDetails)) .setter(setter(Builder::includeTransactionDetails)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeTransactionDetails").build()) .build(); private static final SdkField INCLUDE_PARTITION_VALUE_FIELD = SdkField. builder(MarshallingType.BOOLEAN) .getter(getter(KafkaSettings::includePartitionValue)).setter(setter(Builder::includePartitionValue)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludePartitionValue").build()) .build(); private static final SdkField PARTITION_INCLUDE_SCHEMA_TABLE_FIELD = SdkField . builder(MarshallingType.BOOLEAN) .getter(getter(KafkaSettings::partitionIncludeSchemaTable)) .setter(setter(Builder::partitionIncludeSchemaTable)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("PartitionIncludeSchemaTable") .build()).build(); private static final SdkField INCLUDE_TABLE_ALTER_OPERATIONS_FIELD = SdkField . builder(MarshallingType.BOOLEAN) .getter(getter(KafkaSettings::includeTableAlterOperations)) .setter(setter(Builder::includeTableAlterOperations)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeTableAlterOperations") .build()).build(); private static final SdkField INCLUDE_CONTROL_DETAILS_FIELD = SdkField. builder(MarshallingType.BOOLEAN) .getter(getter(KafkaSettings::includeControlDetails)).setter(setter(Builder::includeControlDetails)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeControlDetails").build()) .build(); private static final SdkField MESSAGE_MAX_BYTES_FIELD = SdkField. builder(MarshallingType.INTEGER) .getter(getter(KafkaSettings::messageMaxBytes)).setter(setter(Builder::messageMaxBytes)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("MessageMaxBytes").build()).build(); private static final SdkField INCLUDE_NULL_AND_EMPTY_FIELD = SdkField. builder(MarshallingType.BOOLEAN) .getter(getter(KafkaSettings::includeNullAndEmpty)).setter(setter(Builder::includeNullAndEmpty)) .traits(LocationTrait.builder().location(MarshallLocation.PAYLOAD).locationName("IncludeNullAndEmpty").build()) .build(); private static final List> SDK_FIELDS = Collections.unmodifiableList(Arrays.asList(BROKER_FIELD, TOPIC_FIELD, MESSAGE_FORMAT_FIELD, INCLUDE_TRANSACTION_DETAILS_FIELD, INCLUDE_PARTITION_VALUE_FIELD, PARTITION_INCLUDE_SCHEMA_TABLE_FIELD, INCLUDE_TABLE_ALTER_OPERATIONS_FIELD, INCLUDE_CONTROL_DETAILS_FIELD, MESSAGE_MAX_BYTES_FIELD, INCLUDE_NULL_AND_EMPTY_FIELD)); private static final long serialVersionUID = 1L; private final String broker; private final String topic; private final String messageFormat; private final Boolean includeTransactionDetails; private final Boolean includePartitionValue; private final Boolean partitionIncludeSchemaTable; private final Boolean includeTableAlterOperations; private final Boolean includeControlDetails; private final Integer messageMaxBytes; private final Boolean includeNullAndEmpty; private KafkaSettings(BuilderImpl builder) { this.broker = builder.broker; this.topic = builder.topic; this.messageFormat = builder.messageFormat; this.includeTransactionDetails = builder.includeTransactionDetails; this.includePartitionValue = builder.includePartitionValue; this.partitionIncludeSchemaTable = builder.partitionIncludeSchemaTable; this.includeTableAlterOperations = builder.includeTableAlterOperations; this.includeControlDetails = builder.includeControlDetails; this.messageMaxBytes = builder.messageMaxBytes; this.includeNullAndEmpty = builder.includeNullAndEmpty; } /** *

* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form * broker-hostname-or-ip:port . For example, * "ec2-12-345-678-901.compute-1.amazonaws.com:2345". *

* * @return The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in * the form broker-hostname-or-ip:port . For example, * "ec2-12-345-678-901.compute-1.amazonaws.com:2345". */ public String broker() { return broker; } /** *

* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies * "kafka-default-topic" as the migration topic. *

* * @return The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies * "kafka-default-topic" as the migration topic. */ public String topic() { return topic; } /** *

* The output format for the records created on the endpoint. The message format is JSON (default) or * JSON_UNFORMATTED (a single line with no tab). *

*

* If the service returns an enum value that is not available in the current SDK version, {@link #messageFormat} * will return {@link MessageFormatValue#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available * from {@link #messageFormatAsString}. *

* * @return The output format for the records created on the endpoint. The message format is JSON * (default) or JSON_UNFORMATTED (a single line with no tab). * @see MessageFormatValue */ public MessageFormatValue messageFormat() { return MessageFormatValue.fromValue(messageFormat); } /** *

* The output format for the records created on the endpoint. The message format is JSON (default) or * JSON_UNFORMATTED (a single line with no tab). *

*

* If the service returns an enum value that is not available in the current SDK version, {@link #messageFormat} * will return {@link MessageFormatValue#UNKNOWN_TO_SDK_VERSION}. The raw value returned by the service is available * from {@link #messageFormatAsString}. *

* * @return The output format for the records created on the endpoint. The message format is JSON * (default) or JSON_UNFORMATTED (a single line with no tab). * @see MessageFormatValue */ public String messageFormatAsString() { return messageFormat; } /** *

* Provides detailed transaction information from the source database. This information includes a commit timestamp, * a log position, and values for transaction_id, previous transaction_id, and * transaction_record_id (the record offset within a transaction). The default is false. *

* * @return Provides detailed transaction information from the source database. This information includes a commit * timestamp, a log position, and values for transaction_id, previous * transaction_id, and transaction_record_id (the record offset within a * transaction). The default is false. */ public Boolean includeTransactionDetails() { return includeTransactionDetails; } /** *

* Shows the partition value within the Kafka message output, unless the partition type is * schema-table-type. The default is false. *

* * @return Shows the partition value within the Kafka message output, unless the partition type is * schema-table-type. The default is false. */ public Boolean includePartitionValue() { return includePartitionValue; } /** *

* Prefixes schema and table names to partition values, when the partition type is primary-key-type. * Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has * thousands of tables and each table has only limited range for a primary key. In this case, the same primary key * is sent from thousands of tables to the same partition, which causes throttling. The default is * false. *

* * @return Prefixes schema and table names to partition values, when the partition type is * primary-key-type. Doing this increases data distribution among Kafka partitions. For * example, suppose that a SysBench schema has thousands of tables and each table has only limited range for * a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, * which causes throttling. The default is false. */ public Boolean partitionIncludeSchemaTable() { return partitionIncludeSchemaTable; } /** *

* Includes any data definition language (DDL) operations that change the table in the control data, such as * rename-table, drop-table, add-column, drop-column, and * rename-column. The default is false. *

* * @return Includes any data definition language (DDL) operations that change the table in the control data, such as * rename-table, drop-table, add-column, drop-column, * and rename-column. The default is false. */ public Boolean includeTableAlterOperations() { return includeTableAlterOperations; } /** *

* Shows detailed control information for table definition, column definition, and table and column changes in the * Kafka message output. The default is false. *

* * @return Shows detailed control information for table definition, column definition, and table and column changes * in the Kafka message output. The default is false. */ public Boolean includeControlDetails() { return includeControlDetails; } /** *

* The maximum size in bytes for records created on the endpoint The default is 1,000,000. *

* * @return The maximum size in bytes for records created on the endpoint The default is 1,000,000. */ public Integer messageMaxBytes() { return messageMaxBytes; } /** *

* Include NULL and empty columns for records migrated to the endpoint. The default is false. *

* * @return Include NULL and empty columns for records migrated to the endpoint. The default is false. */ public Boolean includeNullAndEmpty() { return includeNullAndEmpty; } @Override public Builder toBuilder() { return new BuilderImpl(this); } public static Builder builder() { return new BuilderImpl(); } public static Class serializableBuilderClass() { return BuilderImpl.class; } @Override public int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + Objects.hashCode(broker()); hashCode = 31 * hashCode + Objects.hashCode(topic()); hashCode = 31 * hashCode + Objects.hashCode(messageFormatAsString()); hashCode = 31 * hashCode + Objects.hashCode(includeTransactionDetails()); hashCode = 31 * hashCode + Objects.hashCode(includePartitionValue()); hashCode = 31 * hashCode + Objects.hashCode(partitionIncludeSchemaTable()); hashCode = 31 * hashCode + Objects.hashCode(includeTableAlterOperations()); hashCode = 31 * hashCode + Objects.hashCode(includeControlDetails()); hashCode = 31 * hashCode + Objects.hashCode(messageMaxBytes()); hashCode = 31 * hashCode + Objects.hashCode(includeNullAndEmpty()); return hashCode; } @Override public boolean equals(Object obj) { return equalsBySdkFields(obj); } @Override public boolean equalsBySdkFields(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof KafkaSettings)) { return false; } KafkaSettings other = (KafkaSettings) obj; return Objects.equals(broker(), other.broker()) && Objects.equals(topic(), other.topic()) && Objects.equals(messageFormatAsString(), other.messageFormatAsString()) && Objects.equals(includeTransactionDetails(), other.includeTransactionDetails()) && Objects.equals(includePartitionValue(), other.includePartitionValue()) && Objects.equals(partitionIncludeSchemaTable(), other.partitionIncludeSchemaTable()) && Objects.equals(includeTableAlterOperations(), other.includeTableAlterOperations()) && Objects.equals(includeControlDetails(), other.includeControlDetails()) && Objects.equals(messageMaxBytes(), other.messageMaxBytes()) && Objects.equals(includeNullAndEmpty(), other.includeNullAndEmpty()); } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. */ @Override public String toString() { return ToString.builder("KafkaSettings").add("Broker", broker()).add("Topic", topic()) .add("MessageFormat", messageFormatAsString()).add("IncludeTransactionDetails", includeTransactionDetails()) .add("IncludePartitionValue", includePartitionValue()) .add("PartitionIncludeSchemaTable", partitionIncludeSchemaTable()) .add("IncludeTableAlterOperations", includeTableAlterOperations()) .add("IncludeControlDetails", includeControlDetails()).add("MessageMaxBytes", messageMaxBytes()) .add("IncludeNullAndEmpty", includeNullAndEmpty()).build(); } public Optional getValueForField(String fieldName, Class clazz) { switch (fieldName) { case "Broker": return Optional.ofNullable(clazz.cast(broker())); case "Topic": return Optional.ofNullable(clazz.cast(topic())); case "MessageFormat": return Optional.ofNullable(clazz.cast(messageFormatAsString())); case "IncludeTransactionDetails": return Optional.ofNullable(clazz.cast(includeTransactionDetails())); case "IncludePartitionValue": return Optional.ofNullable(clazz.cast(includePartitionValue())); case "PartitionIncludeSchemaTable": return Optional.ofNullable(clazz.cast(partitionIncludeSchemaTable())); case "IncludeTableAlterOperations": return Optional.ofNullable(clazz.cast(includeTableAlterOperations())); case "IncludeControlDetails": return Optional.ofNullable(clazz.cast(includeControlDetails())); case "MessageMaxBytes": return Optional.ofNullable(clazz.cast(messageMaxBytes())); case "IncludeNullAndEmpty": return Optional.ofNullable(clazz.cast(includeNullAndEmpty())); default: return Optional.empty(); } } @Override public List> sdkFields() { return SDK_FIELDS; } private static Function getter(Function g) { return obj -> g.apply((KafkaSettings) obj); } private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } public interface Builder extends SdkPojo, CopyableBuilder { /** *

* The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the * form broker-hostname-or-ip:port . For example, * "ec2-12-345-678-901.compute-1.amazonaws.com:2345". *

* * @param broker * The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in * the form broker-hostname-or-ip:port . For example, * "ec2-12-345-678-901.compute-1.amazonaws.com:2345". * @return Returns a reference to this object so that method calls can be chained together. */ Builder broker(String broker); /** *

* The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies * "kafka-default-topic" as the migration topic. *

* * @param topic * The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies * "kafka-default-topic" as the migration topic. * @return Returns a reference to this object so that method calls can be chained together. */ Builder topic(String topic); /** *

* The output format for the records created on the endpoint. The message format is JSON (default) * or JSON_UNFORMATTED (a single line with no tab). *

* * @param messageFormat * The output format for the records created on the endpoint. The message format is JSON * (default) or JSON_UNFORMATTED (a single line with no tab). * @see MessageFormatValue * @return Returns a reference to this object so that method calls can be chained together. * @see MessageFormatValue */ Builder messageFormat(String messageFormat); /** *

* The output format for the records created on the endpoint. The message format is JSON (default) * or JSON_UNFORMATTED (a single line with no tab). *

* * @param messageFormat * The output format for the records created on the endpoint. The message format is JSON * (default) or JSON_UNFORMATTED (a single line with no tab). * @see MessageFormatValue * @return Returns a reference to this object so that method calls can be chained together. * @see MessageFormatValue */ Builder messageFormat(MessageFormatValue messageFormat); /** *

* Provides detailed transaction information from the source database. This information includes a commit * timestamp, a log position, and values for transaction_id, previous transaction_id, * and transaction_record_id (the record offset within a transaction). The default is * false. *

* * @param includeTransactionDetails * Provides detailed transaction information from the source database. This information includes a commit * timestamp, a log position, and values for transaction_id, previous * transaction_id, and transaction_record_id (the record offset within a * transaction). The default is false. * @return Returns a reference to this object so that method calls can be chained together. */ Builder includeTransactionDetails(Boolean includeTransactionDetails); /** *

* Shows the partition value within the Kafka message output, unless the partition type is * schema-table-type. The default is false. *

* * @param includePartitionValue * Shows the partition value within the Kafka message output, unless the partition type is * schema-table-type. The default is false. * @return Returns a reference to this object so that method calls can be chained together. */ Builder includePartitionValue(Boolean includePartitionValue); /** *

* Prefixes schema and table names to partition values, when the partition type is primary-key-type * . Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema * has thousands of tables and each table has only limited range for a primary key. In this case, the same * primary key is sent from thousands of tables to the same partition, which causes throttling. The default is * false. *

* * @param partitionIncludeSchemaTable * Prefixes schema and table names to partition values, when the partition type is * primary-key-type. Doing this increases data distribution among Kafka partitions. For * example, suppose that a SysBench schema has thousands of tables and each table has only limited range * for a primary key. In this case, the same primary key is sent from thousands of tables to the same * partition, which causes throttling. The default is false. * @return Returns a reference to this object so that method calls can be chained together. */ Builder partitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable); /** *

* Includes any data definition language (DDL) operations that change the table in the control data, such as * rename-table, drop-table, add-column, drop-column, and * rename-column. The default is false. *

* * @param includeTableAlterOperations * Includes any data definition language (DDL) operations that change the table in the control data, such * as rename-table, drop-table, add-column, * drop-column, and rename-column. The default is false. * @return Returns a reference to this object so that method calls can be chained together. */ Builder includeTableAlterOperations(Boolean includeTableAlterOperations); /** *

* Shows detailed control information for table definition, column definition, and table and column changes in * the Kafka message output. The default is false. *

* * @param includeControlDetails * Shows detailed control information for table definition, column definition, and table and column * changes in the Kafka message output. The default is false. * @return Returns a reference to this object so that method calls can be chained together. */ Builder includeControlDetails(Boolean includeControlDetails); /** *

* The maximum size in bytes for records created on the endpoint The default is 1,000,000. *

* * @param messageMaxBytes * The maximum size in bytes for records created on the endpoint The default is 1,000,000. * @return Returns a reference to this object so that method calls can be chained together. */ Builder messageMaxBytes(Integer messageMaxBytes); /** *

* Include NULL and empty columns for records migrated to the endpoint. The default is false. *

* * @param includeNullAndEmpty * Include NULL and empty columns for records migrated to the endpoint. The default is false * . * @return Returns a reference to this object so that method calls can be chained together. */ Builder includeNullAndEmpty(Boolean includeNullAndEmpty); } static final class BuilderImpl implements Builder { private String broker; private String topic; private String messageFormat; private Boolean includeTransactionDetails; private Boolean includePartitionValue; private Boolean partitionIncludeSchemaTable; private Boolean includeTableAlterOperations; private Boolean includeControlDetails; private Integer messageMaxBytes; private Boolean includeNullAndEmpty; private BuilderImpl() { } private BuilderImpl(KafkaSettings model) { broker(model.broker); topic(model.topic); messageFormat(model.messageFormat); includeTransactionDetails(model.includeTransactionDetails); includePartitionValue(model.includePartitionValue); partitionIncludeSchemaTable(model.partitionIncludeSchemaTable); includeTableAlterOperations(model.includeTableAlterOperations); includeControlDetails(model.includeControlDetails); messageMaxBytes(model.messageMaxBytes); includeNullAndEmpty(model.includeNullAndEmpty); } public final String getBroker() { return broker; } @Override public final Builder broker(String broker) { this.broker = broker; return this; } public final void setBroker(String broker) { this.broker = broker; } public final String getTopic() { return topic; } @Override public final Builder topic(String topic) { this.topic = topic; return this; } public final void setTopic(String topic) { this.topic = topic; } public final String getMessageFormat() { return messageFormat; } @Override public final Builder messageFormat(String messageFormat) { this.messageFormat = messageFormat; return this; } @Override public final Builder messageFormat(MessageFormatValue messageFormat) { this.messageFormat(messageFormat == null ? null : messageFormat.toString()); return this; } public final void setMessageFormat(String messageFormat) { this.messageFormat = messageFormat; } public final Boolean getIncludeTransactionDetails() { return includeTransactionDetails; } @Override public final Builder includeTransactionDetails(Boolean includeTransactionDetails) { this.includeTransactionDetails = includeTransactionDetails; return this; } public final void setIncludeTransactionDetails(Boolean includeTransactionDetails) { this.includeTransactionDetails = includeTransactionDetails; } public final Boolean getIncludePartitionValue() { return includePartitionValue; } @Override public final Builder includePartitionValue(Boolean includePartitionValue) { this.includePartitionValue = includePartitionValue; return this; } public final void setIncludePartitionValue(Boolean includePartitionValue) { this.includePartitionValue = includePartitionValue; } public final Boolean getPartitionIncludeSchemaTable() { return partitionIncludeSchemaTable; } @Override public final Builder partitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) { this.partitionIncludeSchemaTable = partitionIncludeSchemaTable; return this; } public final void setPartitionIncludeSchemaTable(Boolean partitionIncludeSchemaTable) { this.partitionIncludeSchemaTable = partitionIncludeSchemaTable; } public final Boolean getIncludeTableAlterOperations() { return includeTableAlterOperations; } @Override public final Builder includeTableAlterOperations(Boolean includeTableAlterOperations) { this.includeTableAlterOperations = includeTableAlterOperations; return this; } public final void setIncludeTableAlterOperations(Boolean includeTableAlterOperations) { this.includeTableAlterOperations = includeTableAlterOperations; } public final Boolean getIncludeControlDetails() { return includeControlDetails; } @Override public final Builder includeControlDetails(Boolean includeControlDetails) { this.includeControlDetails = includeControlDetails; return this; } public final void setIncludeControlDetails(Boolean includeControlDetails) { this.includeControlDetails = includeControlDetails; } public final Integer getMessageMaxBytes() { return messageMaxBytes; } @Override public final Builder messageMaxBytes(Integer messageMaxBytes) { this.messageMaxBytes = messageMaxBytes; return this; } public final void setMessageMaxBytes(Integer messageMaxBytes) { this.messageMaxBytes = messageMaxBytes; } public final Boolean getIncludeNullAndEmpty() { return includeNullAndEmpty; } @Override public final Builder includeNullAndEmpty(Boolean includeNullAndEmpty) { this.includeNullAndEmpty = includeNullAndEmpty; return this; } public final void setIncludeNullAndEmpty(Boolean includeNullAndEmpty) { this.includeNullAndEmpty = includeNullAndEmpty; } @Override public KafkaSettings build() { return new KafkaSettings(this); } @Override public List> sdkFields() { return SDK_FIELDS; } } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy