All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.camel.component.kafka.springboot.KafkaComponentConfiguration Maven / Gradle / Ivy

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.camel.component.kafka.springboot;

import java.util.Map;
import java.util.concurrent.ExecutorService;
import javax.annotation.Generated;
import org.apache.camel.component.kafka.KafkaClientFactory;
import org.apache.camel.component.kafka.KafkaComponent;
import org.apache.camel.component.kafka.KafkaConfiguration;
import org.apache.camel.component.kafka.PollExceptionStrategy;
import org.apache.camel.component.kafka.PollOnError;
import org.apache.camel.component.kafka.SeekPolicy;
import org.apache.camel.component.kafka.consumer.KafkaManualCommitFactory;
import org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer;
import org.apache.camel.component.kafka.serde.KafkaHeaderSerializer;
import org.apache.camel.spi.HeaderFilterStrategy;
import org.apache.camel.spi.StateRepository;
import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon;
import org.apache.camel.support.jsse.SSLContextParameters;
import org.springframework.boot.context.properties.ConfigurationProperties;

/**
 * Sent and receive messages to/from an Apache Kafka broker.
 * 
 * Generated by camel-package-maven-plugin - do not edit this file!
 */
@Generated("org.apache.camel.springboot.maven.SpringBootAutoConfigurationMojo")
@ConfigurationProperties(prefix = "camel.component.kafka")
public class KafkaComponentConfiguration
        extends
            ComponentConfigurationPropertiesCommon {

    /**
     * Whether to enable auto configuration of the kafka component. This is
     * enabled by default.
     */
    private Boolean enabled;
    /**
     * Sets additional properties for either kafka consumer or kafka producer in
     * case they can't be set directly on the camel configurations (e.g: new
     * Kafka properties that are not reflected yet in Camel configurations), the
     * properties have to be prefixed with additionalProperties.. E.g:
     * additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro
     */
    private Map additionalProperties;
    /**
     * URL of the Kafka brokers to use. The format is host1:port1,host2:port2,
     * and the list can be a subset of brokers or a VIP pointing to a subset of
     * brokers. This option is known as bootstrap.servers in the Kafka
     * documentation.
     */
    private String brokers;
    /**
     * The client id is a user-specified string sent in each request to help
     * trace calls. It should logically identify the application making the
     * request.
     */
    private String clientId;
    /**
     * Allows to pre-configure the Kafka component with common options that the
     * endpoints will reuse. The option is a
     * org.apache.camel.component.kafka.KafkaConfiguration type.
     */
    private KafkaConfiguration configuration;
    /**
     * To use a custom HeaderFilterStrategy to filter header to and from Camel
     * message. The option is a org.apache.camel.spi.HeaderFilterStrategy type.
     */
    private HeaderFilterStrategy headerFilterStrategy;
    /**
     * The maximum amount of time in milliseconds to wait when reconnecting to a
     * broker that has repeatedly failed to connect. If provided, the backoff
     * per host will increase exponentially for each consecutive connection
     * failure, up to this maximum. After calculating the backoff increase, 20%
     * random jitter is added to avoid connection storms.
     */
    private Integer reconnectBackoffMaxMs = 1000;
    /**
     * Timeout in milliseconds to wait gracefully for the consumer or producer
     * to shutdown and terminate its worker threads.
     */
    private Integer shutdownTimeout = 30000;
    /**
     * Whether to allow doing manual commits via KafkaManualCommit. If this
     * option is enabled then an instance of KafkaManualCommit is stored on the
     * Exchange message header, which allows end users to access this API and
     * perform manual offset commits via the Kafka consumer.
     */
    private Boolean allowManualCommit = false;
    /**
     * If true, periodically commit to ZooKeeper the offset of messages already
     * fetched by the consumer. This committed offset will be used when the
     * process fails as the position from which the new consumer will begin.
     */
    private Boolean autoCommitEnable = true;
    /**
     * The frequency in ms that the consumer offsets are committed to zookeeper.
     */
    private Integer autoCommitIntervalMs = 5000;
    /**
     * What to do when there is no initial offset in ZooKeeper or if an offset
     * is out of range: earliest : automatically reset the offset to the
     * earliest offset latest : automatically reset the offset to the latest
     * offset fail: throw exception to the consumer
     */
    private String autoOffsetReset = "latest";
    /**
     * This options controls what happens when a consumer is processing an
     * exchange and it fails. If the option is false then the consumer continues
     * to the next message and processes it. If the option is true then the
     * consumer breaks out, and will seek back to offset of the message that
     * caused a failure, and then re-attempt to process this message. However
     * this can lead to endless processing of the same message if its bound to
     * fail every time, eg a poison message. Therefore its recommended to deal
     * with that for example by using Camel's error handler.
     */
    private Boolean breakOnFirstError = false;
    /**
     * Allows for bridging the consumer to the Camel routing Error Handler,
     * which mean any exceptions occurred while the consumer is trying to pickup
     * incoming messages, or the likes, will now be processed as a message and
     * handled by the routing Error Handler. By default the consumer will use
     * the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that
     * will be logged at WARN or ERROR level and ignored.
     */
    private Boolean bridgeErrorHandler = false;
    /**
     * Automatically check the CRC32 of the records consumed. This ensures no
     * on-the-wire or on-disk corruption to the messages occurred. This check
     * adds some overhead, so it may be disabled in cases seeking extreme
     * performance.
     */
    private Boolean checkCrcs = true;
    /**
     * The maximum time, in milliseconds, that the code will wait for a
     * synchronous commit to complete. The option is a java.lang.Long type.
     */
    private Long commitTimeoutMs = 5000L;
    /**
     * The configuration controls the maximum amount of time the client will
     * wait for the response of a request. If the response is not received
     * before the timeout elapses the client will resend the request if
     * necessary or fail the request if retries are exhausted.
     */
    private Integer consumerRequestTimeoutMs = 40000;
    /**
     * The number of consumers that connect to kafka server. Each consumer is
     * run on a separate thread, that retrieves and process the incoming data.
     */
    private Integer consumersCount = 1;
    /**
     * The maximum amount of data the server should return for a fetch request
     * This is not an absolute maximum, if the first message in the first
     * non-empty partition of the fetch is larger than this value, the message
     * will still be returned to ensure that the consumer can make progress. The
     * maximum message size accepted by the broker is defined via
     * message.max.bytes (broker config) or max.message.bytes (topic config).
     * Note that the consumer performs multiple fetches in parallel.
     */
    private Integer fetchMaxBytes = 52428800;
    /**
     * The minimum amount of data the server should return for a fetch request.
     * If insufficient data is available the request will wait for that much
     * data to accumulate before answering the request.
     */
    private Integer fetchMinBytes = 1;
    /**
     * The maximum amount of time the server will block before answering the
     * fetch request if there isn't sufficient data to immediately satisfy
     * fetch.min.bytes
     */
    private Integer fetchWaitMaxMs = 500;
    /**
     * A string that uniquely identifies the group of consumer processes to
     * which this consumer belongs. By setting the same group id multiple
     * processes indicate that they are all part of the same consumer group.
     * This option is required for consumers.
     */
    private String groupId;
    /**
     * A unique identifier of the consumer instance provided by the end user.
     * Only non-empty strings are permitted. If set, the consumer is treated as
     * a static member, which means that only one instance with this ID is
     * allowed in the consumer group at any time. This can be used in
     * combination with a larger session timeout to avoid group rebalances
     * caused by transient unavailability (e.g. process restarts). If not set,
     * the consumer will join the group as a dynamic member, which is the
     * traditional behavior.
     */
    private String groupInstanceId;
    /**
     * To use a custom KafkaHeaderDeserializer to deserialize kafka headers
     * values. The option is a
     * org.apache.camel.component.kafka.serde.KafkaHeaderDeserializer type.
     */
    private KafkaHeaderDeserializer headerDeserializer;
    /**
     * The expected time between heartbeats to the consumer coordinator when
     * using Kafka's group management facilities. Heartbeats are used to ensure
     * that the consumer's session stays active and to facilitate rebalancing
     * when new consumers join or leave the group. The value must be set lower
     * than session.timeout.ms, but typically should be set no higher than 1/3
     * of that value. It can be adjusted even lower to control the expected time
     * for normal rebalances.
     */
    private Integer heartbeatIntervalMs = 3000;
    /**
     * Deserializer class for key that implements the Deserializer interface.
     */
    private String keyDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
    /**
     * The maximum amount of data per-partition the server will return. The
     * maximum total memory used for a request will be #partitions
     * max.partition.fetch.bytes. This size must be at least as large as the
     * maximum message size the server allows or else it is possible for the
     * producer to send messages larger than the consumer can fetch. If that
     * happens, the consumer can get stuck trying to fetch a large message on a
     * certain partition.
     */
    private Integer maxPartitionFetchBytes = 1048576;
    /**
     * The maximum delay between invocations of poll() when using consumer group
     * management. This places an upper bound on the amount of time that the
     * consumer can be idle before fetching more records. If poll() is not
     * called before expiration of this timeout, then the consumer is considered
     * failed and the group will rebalance in order to reassign the partitions
     * to another member. The option is a java.lang.Long type.
     */
    private Long maxPollIntervalMs;
    /**
     * The maximum number of records returned in a single call to poll()
     */
    private Integer maxPollRecords = 500;
    /**
     * The offset repository to use in order to locally store the offset of each
     * partition of the topic. Defining one will disable the autocommit. The
     * option is a org.apache.camel.spi.StateRepository type.
     */
    private StateRepository offsetRepository;
    /**
     * The class name of the partition assignment strategy that the client will
     * use to distribute partition ownership amongst consumer instances when
     * group management is used
     */
    private String partitionAssignor = "org.apache.kafka.clients.consumer.RangeAssignor";
    /**
     * What to do if kafka threw an exception while polling for new messages.
     * Will by default use the value from the component configuration unless an
     * explicit value has been configured on the endpoint level. DISCARD will
     * discard the message and continue to poll next message. ERROR_HANDLER will
     * use Camel's error handler to process the exception, and afterwards
     * continue to poll next message. RECONNECT will re-connect the consumer and
     * try poll the message again RETRY will let the consumer retry polling the
     * same message again STOP will stop the consumer (have to be manually
     * started/restarted if the consumer should be able to consume messages
     * again)
     */
    private PollOnError pollOnError = PollOnError.ERROR_HANDLER;
    /**
     * The timeout used when polling the KafkaConsumer. The option is a
     * java.lang.Long type.
     */
    private Long pollTimeoutMs = 5000L;
    /**
     * Set if KafkaConsumer will read from beginning or end on startup:
     * SeekPolicy.BEGINNING: read from beginning. SeekPolicy.END: read from end.
     */
    private SeekPolicy seekTo;
    /**
     * The timeout used to detect failures when using Kafka's group management
     * facilities.
     */
    private Integer sessionTimeoutMs = 10000;
    /**
     * This enables the use of a specific Avro reader for use with the Confluent
     * Platform schema registry and the
     * io.confluent.kafka.serializers.KafkaAvroDeserializer. This option is only
     * available in the Confluent Platform (not standard Apache Kafka)
     */
    private Boolean specificAvroReader = false;
    /**
     * Whether the topic is a pattern (regular expression). This can be used to
     * subscribe to dynamic number of topics matching the pattern.
     */
    private Boolean topicIsPattern = false;
    /**
     * Deserializer class for value that implements the Deserializer interface.
     */
    private String valueDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
    /**
     * The delay in millis seconds to wait before trying again to create the
     * kafka consumer (kafka-client).
     */
    private Long createConsumerBackoffInterval = 5000L;
    /**
     * Maximum attempts to create the kafka consumer (kafka-client), before
     * eventually giving up and failing. Error during creating the consumer may
     * be fatal due to invalid configuration and as such recovery is not
     * possible. However, one part of the validation is DNS resolution of the
     * bootstrap broker hostnames. This may be a temporary networking problem,
     * and could potentially be recoverable. While other errors are fatal such
     * as some invalid kafka configurations. Unfortunately kafka-client does not
     * separate this kind of errors. Camel will by default retry forever, and
     * therefore never give up. If you want to give up after many attempts then
     * set this option and Camel will then when giving up terminate the
     * consumer. You can manually restart the consumer by stopping and starting
     * the route, to try again.
     */
    private Integer createConsumerBackoffMaxAttempts;
    /**
     * Controls how to read messages written transactionally. If set to
     * read_committed, consumer.poll() will only return transactional messages
     * which have been committed. If set to read_uncommitted (the default),
     * consumer.poll() will return all messages, even transactional messages
     * which have been aborted. Non-transactional messages will be returned
     * unconditionally in either mode. Messages will always be returned in
     * offset order. Hence, in read_committed mode, consumer.poll() will only
     * return messages up to the last stable offset (LSO), which is the one less
     * than the offset of the first open transaction. In particular any messages
     * appearing after messages belonging to ongoing transactions will be
     * withheld until the relevant transaction has been completed. As a result,
     * read_committed consumers will not be able to read up to the high
     * watermark when there are in flight transactions. Further, when in
     * read_committed the seekToEnd method will return the LSO
     */
    private String isolationLevel = "read_uncommitted";
    /**
     * Factory to use for creating KafkaManualCommit instances. This allows to
     * plugin a custom factory to create custom KafkaManualCommit instances in
     * case special logic is needed when doing manual commits that deviates from
     * the default implementation that comes out of the box. The option is a
     * org.apache.camel.component.kafka.consumer.KafkaManualCommitFactory type.
     */
    private KafkaManualCommitFactory kafkaManualCommitFactory;
    /**
     * To use a custom strategy with the consumer to control how to handle
     * exceptions thrown from the Kafka broker while pooling messages. The
     * option is a org.apache.camel.component.kafka.PollExceptionStrategy type.
     */
    private PollExceptionStrategy pollExceptionStrategy;
    /**
     * The delay in millis seconds to wait before trying again to subscribe to
     * the kafka broker.
     */
    private Long subscribeConsumerBackoffInterval = 5000L;
    /**
     * Maximum number the kafka consumer will attempt to subscribe to the kafka
     * broker, before eventually giving up and failing. Error during subscribing
     * the consumer to the kafka topic could be temporary errors due to network
     * issues, and could potentially be recoverable. Camel will by default retry
     * forever, and therefore never give up. If you want to give up after many
     * attempts then set this option and Camel will then when giving up
     * terminate the consumer. You can manually restart the consumer by stopping
     * and starting the route, to try again.
     */
    private Integer subscribeConsumerBackoffMaxAttempts;
    /**
     * If this feature is enabled and a single element of a batch is an Exchange
     * or Message, the producer will generate individual kafka header values for
     * it by using the batch Message to determine the values. Normal behaviour
     * consists in always using the same header values (which are determined by
     * the parent Exchange which contains the Iterable or Iterator).
     */
    private Boolean batchWithIndividualHeaders = false;
    /**
     * The total bytes of memory the producer can use to buffer records waiting
     * to be sent to the server. If records are sent faster than they can be
     * delivered to the server the producer will either block or throw an
     * exception based on the preference specified by block.on.buffer.full.This
     * setting should correspond roughly to the total memory the producer will
     * use, but is not a hard bound since not all memory the producer uses is
     * used for buffering. Some additional memory will be used for compression
     * (if compression is enabled) as well as for maintaining in-flight
     * requests.
     */
    private Integer bufferMemorySize = 33554432;
    /**
     * This parameter allows you to specify the compression codec for all data
     * generated by this producer. Valid values are none, gzip, snappy, lz4 and
     * zstd.
     */
    private String compressionCodec = "none";
    /**
     * Close idle connections after the number of milliseconds specified by this
     * config.
     */
    private Integer connectionMaxIdleMs = 540000;
    /**
     * An upper bound on the time to report success or failure after a call to
     * send() returns. This limits the total time that a record will be delayed
     * prior to sending, the time to await acknowledgement from the broker (if
     * expected), and the time allowed for retriable send failures.
     */
    private Integer deliveryTimeoutMs = 120000;
    /**
     * When set to 'true', the producer will ensure that exactly one copy of
     * each message is written in the stream. If 'false', producer retries due
     * to broker failures, etc., may write duplicates of the retried message in
     * the stream. Note that enabling idempotence requires
     * max.in.flight.requests.per.connection to be less than or equal to 5 (with
     * message ordering preserved for any allowable value), retries to be
     * greater than 0, and acks must be 'all'. Idempotence is enabled by default
     * if no conflicting configurations are set. If conflicting configurations
     * are set and idempotence is not explicitly enabled, idempotence is
     * disabled. If idempotence is explicitly enabled and conflicting
     * configurations are set, a ConfigException is thrown.
     */
    private Boolean enableIdempotence = true;
    /**
     * To use a custom KafkaHeaderSerializer to serialize kafka headers values.
     * The option is a
     * org.apache.camel.component.kafka.serde.KafkaHeaderSerializer type.
     */
    private KafkaHeaderSerializer headerSerializer;
    /**
     * The record key (or null if no key is specified). If this option has been
     * configured then it take precedence over header KafkaConstants#KEY
     */
    private String key;
    /**
     * The serializer class for keys (defaults to the same as for messages if
     * nothing is given).
     */
    private String keySerializer = "org.apache.kafka.common.serialization.StringSerializer";
    /**
     * Whether the producer should be started lazy (on the first message). By
     * starting lazy you can use this to allow CamelContext and routes to
     * startup in situations where a producer may otherwise fail during starting
     * and cause the route to fail being started. By deferring this startup to
     * be lazy then the startup failure can be handled during routing messages
     * via Camel's routing error handlers. Beware that when the first message is
     * processed then creating and starting the producer may take a little time
     * and prolong the total processing time of the processing.
     */
    private Boolean lazyStartProducer = false;
    /**
     * The producer groups together any records that arrive in between request
     * transmissions into a single batched request. Normally this occurs only
     * under load when records arrive faster than they can be sent out. However
     * in some circumstances the client may want to reduce the number of
     * requests even under moderate load. This setting accomplishes this by
     * adding a small amount of artificial delay that is, rather than
     * immediately sending out a record the producer will wait for up to the
     * given delay to allow other records to be sent so that the sends can be
     * batched together. This can be thought of as analogous to Nagle's
     * algorithm in TCP. This setting gives the upper bound on the delay for
     * batching: once we get batch.size worth of records for a partition it will
     * be sent immediately regardless of this setting, however if we have fewer
     * than this many bytes accumulated for this partition we will 'linger' for
     * the specified time waiting for more records to show up. This setting
     * defaults to 0 (i.e. no delay). Setting linger.ms=5, for example, would
     * have the effect of reducing the number of requests sent but would add up
     * to 5ms of latency to records sent in the absense of load.
     */
    private Integer lingerMs = 0;
    /**
     * The configuration controls how long the KafkaProducer's send(),
     * partitionsFor(), initTransactions(), sendOffsetsToTransaction(),
     * commitTransaction() and abortTransaction() methods will block. For send()
     * this timeout bounds the total time waiting for both metadata fetch and
     * buffer allocation (blocking in the user-supplied serializers or
     * partitioner is not counted against this timeout). For partitionsFor()
     * this timeout bounds the time spent waiting for metadata if it is
     * unavailable. The transaction-related methods always block, but may
     * timeout if the transaction coordinator could not be discovered or did not
     * respond within the timeout.
     */
    private Integer maxBlockMs = 60000;
    /**
     * The maximum number of unacknowledged requests the client will send on a
     * single connection before blocking. Note that if this setting is set to be
     * greater than 1 and there are failed sends, there is a risk of message
     * re-ordering due to retries (i.e., if retries are enabled).
     */
    private Integer maxInFlightRequest = 5;
    /**
     * The maximum size of a request. This is also effectively a cap on the
     * maximum record size. Note that the server has its own cap on record size
     * which may be different from this. This setting will limit the number of
     * record batches the producer will send in a single request to avoid
     * sending huge requests.
     */
    private Integer maxRequestSize = 1048576;
    /**
     * The period of time in milliseconds after which we force a refresh of
     * metadata even if we haven't seen any partition leadership changes to
     * proactively discover any new brokers or partitions.
     */
    private Integer metadataMaxAgeMs = 300000;
    /**
     * A list of classes to use as metrics reporters. Implementing the
     * MetricReporter interface allows plugging in classes that will be notified
     * of new metric creation. The JmxReporter is always included to register
     * JMX statistics.
     */
    private String metricReporters;
    /**
     * The window of time a metrics sample is computed over.
     */
    private Integer metricsSampleWindowMs = 30000;
    /**
     * The number of samples maintained to compute metrics.
     */
    private Integer noOfMetricsSample = 2;
    /**
     * The partitioner class for partitioning messages amongst sub-topics. The
     * default partitioner is based on the hash of the key.
     */
    private String partitioner = "org.apache.kafka.clients.producer.internals.DefaultPartitioner";
    /**
     * The partition to which the record will be sent (or null if no partition
     * was specified). If this option has been configured then it take
     * precedence over header KafkaConstants#PARTITION_KEY
     */
    private Integer partitionKey;
    /**
     * The producer will attempt to batch records together into fewer requests
     * whenever multiple records are being sent to the same partition. This
     * helps performance on both the client and the server. This configuration
     * controls the default batch size in bytes. No attempt will be made to
     * batch records larger than this size.Requests sent to brokers will contain
     * multiple batches, one for each partition with data available to be sent.A
     * small batch size will make batching less common and may reduce throughput
     * (a batch size of zero will disable batching entirely). A very large batch
     * size may use memory a bit more wastefully as we will always allocate a
     * buffer of the specified batch size in anticipation of additional records.
     */
    private Integer producerBatchSize = 16384;
    /**
     * The maximum number of unsent messages that can be queued up the producer
     * when using async mode before either the producer must be blocked or data
     * must be dropped.
     */
    private Integer queueBufferingMaxMessages = 10000;
    /**
     * The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
     */
    private Integer receiveBufferBytes = 65536;
    /**
     * The amount of time to wait before attempting to reconnect to a given
     * host. This avoids repeatedly connecting to a host in a tight loop. This
     * backoff applies to all requests sent by the consumer to the broker.
     */
    private Integer reconnectBackoffMs = 50;
    /**
     * Whether the producer should store the RecordMetadata results from sending
     * to Kafka. The results are stored in a List containing the RecordMetadata
     * metadata's. The list is stored on a header with the key
     * KafkaConstants#KAFKA_RECORDMETA
     */
    private Boolean recordMetadata = true;
    /**
     * The number of acknowledgments the producer requires the leader to have
     * received before considering a request complete. This controls the
     * durability of records that are sent. The following settings are allowed:
     * acks=0 If set to zero then the producer will not wait for any
     * acknowledgment from the server at all. The record will be immediately
     * added to the socket buffer and considered sent. No guarantee can be made
     * that the server has received the record in this case, and the retries
     * configuration will not take effect (as the client won't generally know of
     * any failures). The offset given back for each record will always be set
     * to -1. acks=1 This will mean the leader will write the record to its
     * local log but will respond without awaiting full acknowledgement from all
     * followers. In this case should the leader fail immediately after
     * acknowledging the record but before the followers have replicated it then
     * the record will be lost. acks=all This means the leader will wait for the
     * full set of in-sync replicas to acknowledge the record. This guarantees
     * that the record will not be lost as long as at least one in-sync replica
     * remains alive. This is the strongest available guarantee. This is
     * equivalent to the acks=-1 setting. Note that enabling idempotence
     * requires this config value to be 'all'. If conflicting configurations are
     * set and idempotence is not explicitly enabled, idempotence is disabled.
     */
    private String requestRequiredAcks = "all";
    /**
     * The amount of time the broker will wait trying to meet the
     * request.required.acks requirement before sending back an error to the
     * client.
     */
    private Integer requestTimeoutMs = 30000;
    /**
     * Setting a value greater than zero will cause the client to resend any
     * record whose send fails with a potentially transient error. Note that
     * this retry is no different than if the client resent the record upon
     * receiving the error. Produce requests will be failed before the number of
     * retries has been exhausted if the timeout configured by
     * delivery.timeout.ms expires first before successful acknowledgement.
     * Users should generally prefer to leave this config unset and instead use
     * delivery.timeout.ms to control retry behavior. Enabling idempotence
     * requires this config value to be greater than 0. If conflicting
     * configurations are set and idempotence is not explicitly enabled,
     * idempotence is disabled. Allowing retries while setting
     * enable.idempotence to false and max.in.flight.requests.per.connection to
     * 1 will potentially change the ordering of records because if two batches
     * are sent to a single partition, and the first fails and is retried but
     * the second succeeds, then the records in the second batch may appear
     * first.
     */
    private Integer retries;
    /**
     * Before each retry, the producer refreshes the metadata of relevant topics
     * to see if a new leader has been elected. Since leader election takes a
     * bit of time, this property specifies the amount of time that the producer
     * waits before refreshing the metadata.
     */
    private Integer retryBackoffMs = 100;
    /**
     * Socket write buffer size
     */
    private Integer sendBufferBytes = 131072;
    /**
     * The serializer class for messages.
     */
    private String valueSerializer = "org.apache.kafka.common.serialization.StringSerializer";
    /**
     * To use a custom worker pool for continue routing Exchange after kafka
     * server has acknowledge the message that was sent to it from KafkaProducer
     * using asynchronous non-blocking processing. If using this option then you
     * must handle the lifecycle of the thread pool to shut the pool down when
     * no longer needed. The option is a java.util.concurrent.ExecutorService
     * type.
     */
    private ExecutorService workerPool;
    /**
     * Number of core threads for the worker pool for continue routing Exchange
     * after kafka server has acknowledge the message that was sent to it from
     * KafkaProducer using asynchronous non-blocking processing.
     */
    private Integer workerPoolCoreSize = 10;
    /**
     * Maximum number of threads for the worker pool for continue routing
     * Exchange after kafka server has acknowledge the message that was sent to
     * it from KafkaProducer using asynchronous non-blocking processing.
     */
    private Integer workerPoolMaxSize = 20;
    /**
     * Whether autowiring is enabled. This is used for automatic autowiring
     * options (the option must be marked as autowired) by looking up in the
     * registry to find if there is a single instance of matching type, which
     * then gets configured on the component. This can be used for automatic
     * configuring JDBC data sources, JMS connection factories, AWS Clients,
     * etc.
     */
    private Boolean autowiredEnabled = true;
    /**
     * Factory to use for creating
     * org.apache.kafka.clients.consumer.KafkaConsumer and
     * org.apache.kafka.clients.producer.KafkaProducer instances. This allows to
     * configure a custom factory to create instances with logic that extends
     * the vanilla Kafka clients. The option is a
     * org.apache.camel.component.kafka.KafkaClientFactory type.
     */
    private KafkaClientFactory kafkaClientFactory;
    /**
     * Sets whether synchronous processing should be strictly used
     */
    private Boolean synchronous = false;
    /**
     * URL of the Confluent Platform schema registry servers to use. The format
     * is host1:port1,host2:port2. This is known as schema.registry.url in the
     * Confluent Platform documentation. This option is only available in the
     * Confluent Platform (not standard Apache Kafka)
     */
    private String schemaRegistryURL;
    /**
     * Sets interceptors for producer or consumers. Producer interceptors have
     * to be classes implementing
     * org.apache.kafka.clients.producer.ProducerInterceptor Consumer
     * interceptors have to be classes implementing
     * org.apache.kafka.clients.consumer.ConsumerInterceptor Note that if you
     * use Producer interceptor on a consumer it will throw a class cast
     * exception in runtime
     */
    private String interceptorClasses;
    /**
     * Login thread sleep time between refresh attempts.
     */
    private Integer kerberosBeforeReloginMinTime = 60000;
    /**
     * Kerberos kinit command path. Default is /usr/bin/kinit
     */
    private String kerberosInitCmd = "/usr/bin/kinit";
    /**
     * A list of rules for mapping from principal names to short names
     * (typically operating system usernames). The rules are evaluated in order
     * and the first rule that matches a principal name is used to map it to a
     * short name. Any later rules in the list are ignored. By default,
     * principal names of the form {username}/{hostname}{REALM} are mapped to
     * {username}. For more details on the format please see the security
     * authorization and acls documentation (at the Apache Kafka project).
     * Multiple values can be separated by comma
     */
    private String kerberosPrincipalToLocalRules = "DEFAULT";
    /**
     * Percentage of random jitter added to the renewal time.
     */
    private Double kerberosRenewJitter;
    /**
     * Login thread will sleep until the specified window factor of time from
     * last refresh to ticket's expiry has been reached, at which time it will
     * try to renew the ticket.
     */
    private Double kerberosRenewWindowFactor;
    /**
     * Expose the kafka sasl.jaas.config parameter Example:
     * org.apache.kafka.common.security.plain.PlainLoginModule required
     * username=USERNAME password=PASSWORD;
     */
    private String saslJaasConfig;
    /**
     * The Kerberos principal name that Kafka runs as. This can be defined
     * either in Kafka's JAAS config or in Kafka's config.
     */
    private String saslKerberosServiceName;
    /**
     * The Simple Authentication and Security Layer (SASL) Mechanism used. For
     * the valid values see
     * http://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml
     */
    private String saslMechanism = "GSSAPI";
    /**
     * Protocol used to communicate with brokers. SASL_PLAINTEXT, PLAINTEXT,
     * SASL_SSL and SSL are supported
     */
    private String securityProtocol = "PLAINTEXT";
    /**
     * A list of cipher suites. This is a named combination of authentication,
     * encryption, MAC and key exchange algorithm used to negotiate the security
     * settings for a network connection using TLS or SSL network protocol. By
     * default all the available cipher suites are supported.
     */
    private String sslCipherSuites;
    /**
     * SSL configuration using a Camel SSLContextParameters object. If
     * configured it's applied before the other SSL endpoint parameters. NOTE:
     * Kafka only supports loading keystore from file locations, so prefix the
     * location with file: in the KeyStoreParameters.resource option. The option
     * is a org.apache.camel.support.jsse.SSLContextParameters type.
     */
    private SSLContextParameters sslContextParameters;
    /**
     * The list of protocols enabled for SSL connections. The default is
     * TLSv1.2,TLSv1.3 when running with Java 11 or newer, TLSv1.2 otherwise.
     * With the default value for Java 11, clients and servers will prefer
     * TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming
     * both support at least TLSv1.2). This default should be fine for most
     * cases. Also see the config documentation for SslProtocol.
     */
    private String sslEnabledProtocols;
    /**
     * The endpoint identification algorithm to validate server hostname using
     * server certificate. Use none or false to disable server hostname
     * verification.
     */
    private String sslEndpointAlgorithm = "https";
    /**
     * The algorithm used by key manager factory for SSL connections. Default
     * value is the key manager factory algorithm configured for the Java
     * Virtual Machine.
     */
    private String sslKeymanagerAlgorithm = "SunX509";
    /**
     * The password of the private key in the key store file or the PEM key
     * specified in sslKeystoreKey. This is required for clients only if two-way
     * authentication is configured.
     */
    private String sslKeyPassword;
    /**
     * The location of the key store file. This is optional for client and can
     * be used for two-way authentication for client.
     */
    private String sslKeystoreLocation;
    /**
     * The store password for the key store file. This is optional for client
     * and only needed if sslKeystoreLocation' is configured. Key store password
     * is not supported for PEM format.
     */
    private String sslKeystorePassword;
    /**
     * The file format of the key store file. This is optional for client.
     * Default value is JKS
     */
    private String sslKeystoreType = "JKS";
    /**
     * The SSL protocol used to generate the SSLContext. The default is TLSv1.3
     * when running with Java 11 or newer, TLSv1.2 otherwise. This value should
     * be fine for most use cases. Allowed values in recent JVMs are TLSv1.2 and
     * TLSv1.3. TLS, TLSv1.1, SSL, SSLv2 and SSLv3 may be supported in older
     * JVMs, but their usage is discouraged due to known security
     * vulnerabilities. With the default value for this config and
     * sslEnabledProtocols, clients will downgrade to TLSv1.2 if the server does
     * not support TLSv1.3. If this config is set to TLSv1.2, clients will not
     * use TLSv1.3 even if it is one of the values in sslEnabledProtocols and
     * the server only supports TLSv1.3.
     */
    private String sslProtocol;
    /**
     * The name of the security provider used for SSL connections. Default value
     * is the default security provider of the JVM.
     */
    private String sslProvider;
    /**
     * The algorithm used by trust manager factory for SSL connections. Default
     * value is the trust manager factory algorithm configured for the Java
     * Virtual Machine.
     */
    private String sslTrustmanagerAlgorithm = "PKIX";
    /**
     * The location of the trust store file.
     */
    private String sslTruststoreLocation;
    /**
     * The password for the trust store file. If a password is not set, trust
     * store file configured will still be used, but integrity checking is
     * disabled. Trust store password is not supported for PEM format.
     */
    private String sslTruststorePassword;
    /**
     * The file format of the trust store file. Default value is JKS.
     */
    private String sslTruststoreType = "JKS";
    /**
     * Enable usage of global SSL context parameters.
     */
    private Boolean useGlobalSslContextParameters = false;

    public Map getAdditionalProperties() {
        return additionalProperties;
    }

    public void setAdditionalProperties(Map additionalProperties) {
        this.additionalProperties = additionalProperties;
    }

    public String getBrokers() {
        return brokers;
    }

    public void setBrokers(String brokers) {
        this.brokers = brokers;
    }

    public String getClientId() {
        return clientId;
    }

    public void setClientId(String clientId) {
        this.clientId = clientId;
    }

    public KafkaConfiguration getConfiguration() {
        return configuration;
    }

    public void setConfiguration(KafkaConfiguration configuration) {
        this.configuration = configuration;
    }

    public HeaderFilterStrategy getHeaderFilterStrategy() {
        return headerFilterStrategy;
    }

    public void setHeaderFilterStrategy(
            HeaderFilterStrategy headerFilterStrategy) {
        this.headerFilterStrategy = headerFilterStrategy;
    }

    public Integer getReconnectBackoffMaxMs() {
        return reconnectBackoffMaxMs;
    }

    public void setReconnectBackoffMaxMs(Integer reconnectBackoffMaxMs) {
        this.reconnectBackoffMaxMs = reconnectBackoffMaxMs;
    }

    public Integer getShutdownTimeout() {
        return shutdownTimeout;
    }

    public void setShutdownTimeout(Integer shutdownTimeout) {
        this.shutdownTimeout = shutdownTimeout;
    }

    public Boolean getAllowManualCommit() {
        return allowManualCommit;
    }

    public void setAllowManualCommit(Boolean allowManualCommit) {
        this.allowManualCommit = allowManualCommit;
    }

    public Boolean getAutoCommitEnable() {
        return autoCommitEnable;
    }

    public void setAutoCommitEnable(Boolean autoCommitEnable) {
        this.autoCommitEnable = autoCommitEnable;
    }

    public Integer getAutoCommitIntervalMs() {
        return autoCommitIntervalMs;
    }

    public void setAutoCommitIntervalMs(Integer autoCommitIntervalMs) {
        this.autoCommitIntervalMs = autoCommitIntervalMs;
    }

    public String getAutoOffsetReset() {
        return autoOffsetReset;
    }

    public void setAutoOffsetReset(String autoOffsetReset) {
        this.autoOffsetReset = autoOffsetReset;
    }

    public Boolean getBreakOnFirstError() {
        return breakOnFirstError;
    }

    public void setBreakOnFirstError(Boolean breakOnFirstError) {
        this.breakOnFirstError = breakOnFirstError;
    }

    public Boolean getBridgeErrorHandler() {
        return bridgeErrorHandler;
    }

    public void setBridgeErrorHandler(Boolean bridgeErrorHandler) {
        this.bridgeErrorHandler = bridgeErrorHandler;
    }

    public Boolean getCheckCrcs() {
        return checkCrcs;
    }

    public void setCheckCrcs(Boolean checkCrcs) {
        this.checkCrcs = checkCrcs;
    }

    public Long getCommitTimeoutMs() {
        return commitTimeoutMs;
    }

    public void setCommitTimeoutMs(Long commitTimeoutMs) {
        this.commitTimeoutMs = commitTimeoutMs;
    }

    public Integer getConsumerRequestTimeoutMs() {
        return consumerRequestTimeoutMs;
    }

    public void setConsumerRequestTimeoutMs(Integer consumerRequestTimeoutMs) {
        this.consumerRequestTimeoutMs = consumerRequestTimeoutMs;
    }

    public Integer getConsumersCount() {
        return consumersCount;
    }

    public void setConsumersCount(Integer consumersCount) {
        this.consumersCount = consumersCount;
    }

    public Integer getFetchMaxBytes() {
        return fetchMaxBytes;
    }

    public void setFetchMaxBytes(Integer fetchMaxBytes) {
        this.fetchMaxBytes = fetchMaxBytes;
    }

    public Integer getFetchMinBytes() {
        return fetchMinBytes;
    }

    public void setFetchMinBytes(Integer fetchMinBytes) {
        this.fetchMinBytes = fetchMinBytes;
    }

    public Integer getFetchWaitMaxMs() {
        return fetchWaitMaxMs;
    }

    public void setFetchWaitMaxMs(Integer fetchWaitMaxMs) {
        this.fetchWaitMaxMs = fetchWaitMaxMs;
    }

    public String getGroupId() {
        return groupId;
    }

    public void setGroupId(String groupId) {
        this.groupId = groupId;
    }

    public String getGroupInstanceId() {
        return groupInstanceId;
    }

    public void setGroupInstanceId(String groupInstanceId) {
        this.groupInstanceId = groupInstanceId;
    }

    public KafkaHeaderDeserializer getHeaderDeserializer() {
        return headerDeserializer;
    }

    public void setHeaderDeserializer(KafkaHeaderDeserializer headerDeserializer) {
        this.headerDeserializer = headerDeserializer;
    }

    public Integer getHeartbeatIntervalMs() {
        return heartbeatIntervalMs;
    }

    public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) {
        this.heartbeatIntervalMs = heartbeatIntervalMs;
    }

    public String getKeyDeserializer() {
        return keyDeserializer;
    }

    public void setKeyDeserializer(String keyDeserializer) {
        this.keyDeserializer = keyDeserializer;
    }

    public Integer getMaxPartitionFetchBytes() {
        return maxPartitionFetchBytes;
    }

    public void setMaxPartitionFetchBytes(Integer maxPartitionFetchBytes) {
        this.maxPartitionFetchBytes = maxPartitionFetchBytes;
    }

    public Long getMaxPollIntervalMs() {
        return maxPollIntervalMs;
    }

    public void setMaxPollIntervalMs(Long maxPollIntervalMs) {
        this.maxPollIntervalMs = maxPollIntervalMs;
    }

    public Integer getMaxPollRecords() {
        return maxPollRecords;
    }

    public void setMaxPollRecords(Integer maxPollRecords) {
        this.maxPollRecords = maxPollRecords;
    }

    public StateRepository getOffsetRepository() {
        return offsetRepository;
    }

    public void setOffsetRepository(
            StateRepository offsetRepository) {
        this.offsetRepository = offsetRepository;
    }

    public String getPartitionAssignor() {
        return partitionAssignor;
    }

    public void setPartitionAssignor(String partitionAssignor) {
        this.partitionAssignor = partitionAssignor;
    }

    public PollOnError getPollOnError() {
        return pollOnError;
    }

    public void setPollOnError(PollOnError pollOnError) {
        this.pollOnError = pollOnError;
    }

    public Long getPollTimeoutMs() {
        return pollTimeoutMs;
    }

    public void setPollTimeoutMs(Long pollTimeoutMs) {
        this.pollTimeoutMs = pollTimeoutMs;
    }

    public SeekPolicy getSeekTo() {
        return seekTo;
    }

    public void setSeekTo(SeekPolicy seekTo) {
        this.seekTo = seekTo;
    }

    public Integer getSessionTimeoutMs() {
        return sessionTimeoutMs;
    }

    public void setSessionTimeoutMs(Integer sessionTimeoutMs) {
        this.sessionTimeoutMs = sessionTimeoutMs;
    }

    public Boolean getSpecificAvroReader() {
        return specificAvroReader;
    }

    public void setSpecificAvroReader(Boolean specificAvroReader) {
        this.specificAvroReader = specificAvroReader;
    }

    public Boolean getTopicIsPattern() {
        return topicIsPattern;
    }

    public void setTopicIsPattern(Boolean topicIsPattern) {
        this.topicIsPattern = topicIsPattern;
    }

    public String getValueDeserializer() {
        return valueDeserializer;
    }

    public void setValueDeserializer(String valueDeserializer) {
        this.valueDeserializer = valueDeserializer;
    }

    public Long getCreateConsumerBackoffInterval() {
        return createConsumerBackoffInterval;
    }

    public void setCreateConsumerBackoffInterval(
            Long createConsumerBackoffInterval) {
        this.createConsumerBackoffInterval = createConsumerBackoffInterval;
    }

    public Integer getCreateConsumerBackoffMaxAttempts() {
        return createConsumerBackoffMaxAttempts;
    }

    public void setCreateConsumerBackoffMaxAttempts(
            Integer createConsumerBackoffMaxAttempts) {
        this.createConsumerBackoffMaxAttempts = createConsumerBackoffMaxAttempts;
    }

    public String getIsolationLevel() {
        return isolationLevel;
    }

    public void setIsolationLevel(String isolationLevel) {
        this.isolationLevel = isolationLevel;
    }

    public KafkaManualCommitFactory getKafkaManualCommitFactory() {
        return kafkaManualCommitFactory;
    }

    public void setKafkaManualCommitFactory(
            KafkaManualCommitFactory kafkaManualCommitFactory) {
        this.kafkaManualCommitFactory = kafkaManualCommitFactory;
    }

    public PollExceptionStrategy getPollExceptionStrategy() {
        return pollExceptionStrategy;
    }

    public void setPollExceptionStrategy(
            PollExceptionStrategy pollExceptionStrategy) {
        this.pollExceptionStrategy = pollExceptionStrategy;
    }

    public Long getSubscribeConsumerBackoffInterval() {
        return subscribeConsumerBackoffInterval;
    }

    public void setSubscribeConsumerBackoffInterval(
            Long subscribeConsumerBackoffInterval) {
        this.subscribeConsumerBackoffInterval = subscribeConsumerBackoffInterval;
    }

    public Integer getSubscribeConsumerBackoffMaxAttempts() {
        return subscribeConsumerBackoffMaxAttempts;
    }

    public void setSubscribeConsumerBackoffMaxAttempts(
            Integer subscribeConsumerBackoffMaxAttempts) {
        this.subscribeConsumerBackoffMaxAttempts = subscribeConsumerBackoffMaxAttempts;
    }

    public Boolean getBatchWithIndividualHeaders() {
        return batchWithIndividualHeaders;
    }

    public void setBatchWithIndividualHeaders(Boolean batchWithIndividualHeaders) {
        this.batchWithIndividualHeaders = batchWithIndividualHeaders;
    }

    public Integer getBufferMemorySize() {
        return bufferMemorySize;
    }

    public void setBufferMemorySize(Integer bufferMemorySize) {
        this.bufferMemorySize = bufferMemorySize;
    }

    public String getCompressionCodec() {
        return compressionCodec;
    }

    public void setCompressionCodec(String compressionCodec) {
        this.compressionCodec = compressionCodec;
    }

    public Integer getConnectionMaxIdleMs() {
        return connectionMaxIdleMs;
    }

    public void setConnectionMaxIdleMs(Integer connectionMaxIdleMs) {
        this.connectionMaxIdleMs = connectionMaxIdleMs;
    }

    public Integer getDeliveryTimeoutMs() {
        return deliveryTimeoutMs;
    }

    public void setDeliveryTimeoutMs(Integer deliveryTimeoutMs) {
        this.deliveryTimeoutMs = deliveryTimeoutMs;
    }

    public Boolean getEnableIdempotence() {
        return enableIdempotence;
    }

    public void setEnableIdempotence(Boolean enableIdempotence) {
        this.enableIdempotence = enableIdempotence;
    }

    public KafkaHeaderSerializer getHeaderSerializer() {
        return headerSerializer;
    }

    public void setHeaderSerializer(KafkaHeaderSerializer headerSerializer) {
        this.headerSerializer = headerSerializer;
    }

    public String getKey() {
        return key;
    }

    public void setKey(String key) {
        this.key = key;
    }

    public String getKeySerializer() {
        return keySerializer;
    }

    public void setKeySerializer(String keySerializer) {
        this.keySerializer = keySerializer;
    }

    public Boolean getLazyStartProducer() {
        return lazyStartProducer;
    }

    public void setLazyStartProducer(Boolean lazyStartProducer) {
        this.lazyStartProducer = lazyStartProducer;
    }

    public Integer getLingerMs() {
        return lingerMs;
    }

    public void setLingerMs(Integer lingerMs) {
        this.lingerMs = lingerMs;
    }

    public Integer getMaxBlockMs() {
        return maxBlockMs;
    }

    public void setMaxBlockMs(Integer maxBlockMs) {
        this.maxBlockMs = maxBlockMs;
    }

    public Integer getMaxInFlightRequest() {
        return maxInFlightRequest;
    }

    public void setMaxInFlightRequest(Integer maxInFlightRequest) {
        this.maxInFlightRequest = maxInFlightRequest;
    }

    public Integer getMaxRequestSize() {
        return maxRequestSize;
    }

    public void setMaxRequestSize(Integer maxRequestSize) {
        this.maxRequestSize = maxRequestSize;
    }

    public Integer getMetadataMaxAgeMs() {
        return metadataMaxAgeMs;
    }

    public void setMetadataMaxAgeMs(Integer metadataMaxAgeMs) {
        this.metadataMaxAgeMs = metadataMaxAgeMs;
    }

    public String getMetricReporters() {
        return metricReporters;
    }

    public void setMetricReporters(String metricReporters) {
        this.metricReporters = metricReporters;
    }

    public Integer getMetricsSampleWindowMs() {
        return metricsSampleWindowMs;
    }

    public void setMetricsSampleWindowMs(Integer metricsSampleWindowMs) {
        this.metricsSampleWindowMs = metricsSampleWindowMs;
    }

    public Integer getNoOfMetricsSample() {
        return noOfMetricsSample;
    }

    public void setNoOfMetricsSample(Integer noOfMetricsSample) {
        this.noOfMetricsSample = noOfMetricsSample;
    }

    public String getPartitioner() {
        return partitioner;
    }

    public void setPartitioner(String partitioner) {
        this.partitioner = partitioner;
    }

    public Integer getPartitionKey() {
        return partitionKey;
    }

    public void setPartitionKey(Integer partitionKey) {
        this.partitionKey = partitionKey;
    }

    public Integer getProducerBatchSize() {
        return producerBatchSize;
    }

    public void setProducerBatchSize(Integer producerBatchSize) {
        this.producerBatchSize = producerBatchSize;
    }

    public Integer getQueueBufferingMaxMessages() {
        return queueBufferingMaxMessages;
    }

    public void setQueueBufferingMaxMessages(Integer queueBufferingMaxMessages) {
        this.queueBufferingMaxMessages = queueBufferingMaxMessages;
    }

    public Integer getReceiveBufferBytes() {
        return receiveBufferBytes;
    }

    public void setReceiveBufferBytes(Integer receiveBufferBytes) {
        this.receiveBufferBytes = receiveBufferBytes;
    }

    public Integer getReconnectBackoffMs() {
        return reconnectBackoffMs;
    }

    public void setReconnectBackoffMs(Integer reconnectBackoffMs) {
        this.reconnectBackoffMs = reconnectBackoffMs;
    }

    public Boolean getRecordMetadata() {
        return recordMetadata;
    }

    public void setRecordMetadata(Boolean recordMetadata) {
        this.recordMetadata = recordMetadata;
    }

    public String getRequestRequiredAcks() {
        return requestRequiredAcks;
    }

    public void setRequestRequiredAcks(String requestRequiredAcks) {
        this.requestRequiredAcks = requestRequiredAcks;
    }

    public Integer getRequestTimeoutMs() {
        return requestTimeoutMs;
    }

    public void setRequestTimeoutMs(Integer requestTimeoutMs) {
        this.requestTimeoutMs = requestTimeoutMs;
    }

    public Integer getRetries() {
        return retries;
    }

    public void setRetries(Integer retries) {
        this.retries = retries;
    }

    public Integer getRetryBackoffMs() {
        return retryBackoffMs;
    }

    public void setRetryBackoffMs(Integer retryBackoffMs) {
        this.retryBackoffMs = retryBackoffMs;
    }

    public Integer getSendBufferBytes() {
        return sendBufferBytes;
    }

    public void setSendBufferBytes(Integer sendBufferBytes) {
        this.sendBufferBytes = sendBufferBytes;
    }

    public String getValueSerializer() {
        return valueSerializer;
    }

    public void setValueSerializer(String valueSerializer) {
        this.valueSerializer = valueSerializer;
    }

    public ExecutorService getWorkerPool() {
        return workerPool;
    }

    public void setWorkerPool(ExecutorService workerPool) {
        this.workerPool = workerPool;
    }

    public Integer getWorkerPoolCoreSize() {
        return workerPoolCoreSize;
    }

    public void setWorkerPoolCoreSize(Integer workerPoolCoreSize) {
        this.workerPoolCoreSize = workerPoolCoreSize;
    }

    public Integer getWorkerPoolMaxSize() {
        return workerPoolMaxSize;
    }

    public void setWorkerPoolMaxSize(Integer workerPoolMaxSize) {
        this.workerPoolMaxSize = workerPoolMaxSize;
    }

    public Boolean getAutowiredEnabled() {
        return autowiredEnabled;
    }

    public void setAutowiredEnabled(Boolean autowiredEnabled) {
        this.autowiredEnabled = autowiredEnabled;
    }

    public KafkaClientFactory getKafkaClientFactory() {
        return kafkaClientFactory;
    }

    public void setKafkaClientFactory(KafkaClientFactory kafkaClientFactory) {
        this.kafkaClientFactory = kafkaClientFactory;
    }

    public Boolean getSynchronous() {
        return synchronous;
    }

    public void setSynchronous(Boolean synchronous) {
        this.synchronous = synchronous;
    }

    public String getSchemaRegistryURL() {
        return schemaRegistryURL;
    }

    public void setSchemaRegistryURL(String schemaRegistryURL) {
        this.schemaRegistryURL = schemaRegistryURL;
    }

    public String getInterceptorClasses() {
        return interceptorClasses;
    }

    public void setInterceptorClasses(String interceptorClasses) {
        this.interceptorClasses = interceptorClasses;
    }

    public Integer getKerberosBeforeReloginMinTime() {
        return kerberosBeforeReloginMinTime;
    }

    public void setKerberosBeforeReloginMinTime(
            Integer kerberosBeforeReloginMinTime) {
        this.kerberosBeforeReloginMinTime = kerberosBeforeReloginMinTime;
    }

    public String getKerberosInitCmd() {
        return kerberosInitCmd;
    }

    public void setKerberosInitCmd(String kerberosInitCmd) {
        this.kerberosInitCmd = kerberosInitCmd;
    }

    public String getKerberosPrincipalToLocalRules() {
        return kerberosPrincipalToLocalRules;
    }

    public void setKerberosPrincipalToLocalRules(
            String kerberosPrincipalToLocalRules) {
        this.kerberosPrincipalToLocalRules = kerberosPrincipalToLocalRules;
    }

    public Double getKerberosRenewJitter() {
        return kerberosRenewJitter;
    }

    public void setKerberosRenewJitter(Double kerberosRenewJitter) {
        this.kerberosRenewJitter = kerberosRenewJitter;
    }

    public Double getKerberosRenewWindowFactor() {
        return kerberosRenewWindowFactor;
    }

    public void setKerberosRenewWindowFactor(Double kerberosRenewWindowFactor) {
        this.kerberosRenewWindowFactor = kerberosRenewWindowFactor;
    }

    public String getSaslJaasConfig() {
        return saslJaasConfig;
    }

    public void setSaslJaasConfig(String saslJaasConfig) {
        this.saslJaasConfig = saslJaasConfig;
    }

    public String getSaslKerberosServiceName() {
        return saslKerberosServiceName;
    }

    public void setSaslKerberosServiceName(String saslKerberosServiceName) {
        this.saslKerberosServiceName = saslKerberosServiceName;
    }

    public String getSaslMechanism() {
        return saslMechanism;
    }

    public void setSaslMechanism(String saslMechanism) {
        this.saslMechanism = saslMechanism;
    }

    public String getSecurityProtocol() {
        return securityProtocol;
    }

    public void setSecurityProtocol(String securityProtocol) {
        this.securityProtocol = securityProtocol;
    }

    public String getSslCipherSuites() {
        return sslCipherSuites;
    }

    public void setSslCipherSuites(String sslCipherSuites) {
        this.sslCipherSuites = sslCipherSuites;
    }

    public SSLContextParameters getSslContextParameters() {
        return sslContextParameters;
    }

    public void setSslContextParameters(
            SSLContextParameters sslContextParameters) {
        this.sslContextParameters = sslContextParameters;
    }

    public String getSslEnabledProtocols() {
        return sslEnabledProtocols;
    }

    public void setSslEnabledProtocols(String sslEnabledProtocols) {
        this.sslEnabledProtocols = sslEnabledProtocols;
    }

    public String getSslEndpointAlgorithm() {
        return sslEndpointAlgorithm;
    }

    public void setSslEndpointAlgorithm(String sslEndpointAlgorithm) {
        this.sslEndpointAlgorithm = sslEndpointAlgorithm;
    }

    public String getSslKeymanagerAlgorithm() {
        return sslKeymanagerAlgorithm;
    }

    public void setSslKeymanagerAlgorithm(String sslKeymanagerAlgorithm) {
        this.sslKeymanagerAlgorithm = sslKeymanagerAlgorithm;
    }

    public String getSslKeyPassword() {
        return sslKeyPassword;
    }

    public void setSslKeyPassword(String sslKeyPassword) {
        this.sslKeyPassword = sslKeyPassword;
    }

    public String getSslKeystoreLocation() {
        return sslKeystoreLocation;
    }

    public void setSslKeystoreLocation(String sslKeystoreLocation) {
        this.sslKeystoreLocation = sslKeystoreLocation;
    }

    public String getSslKeystorePassword() {
        return sslKeystorePassword;
    }

    public void setSslKeystorePassword(String sslKeystorePassword) {
        this.sslKeystorePassword = sslKeystorePassword;
    }

    public String getSslKeystoreType() {
        return sslKeystoreType;
    }

    public void setSslKeystoreType(String sslKeystoreType) {
        this.sslKeystoreType = sslKeystoreType;
    }

    public String getSslProtocol() {
        return sslProtocol;
    }

    public void setSslProtocol(String sslProtocol) {
        this.sslProtocol = sslProtocol;
    }

    public String getSslProvider() {
        return sslProvider;
    }

    public void setSslProvider(String sslProvider) {
        this.sslProvider = sslProvider;
    }

    public String getSslTrustmanagerAlgorithm() {
        return sslTrustmanagerAlgorithm;
    }

    public void setSslTrustmanagerAlgorithm(String sslTrustmanagerAlgorithm) {
        this.sslTrustmanagerAlgorithm = sslTrustmanagerAlgorithm;
    }

    public String getSslTruststoreLocation() {
        return sslTruststoreLocation;
    }

    public void setSslTruststoreLocation(String sslTruststoreLocation) {
        this.sslTruststoreLocation = sslTruststoreLocation;
    }

    public String getSslTruststorePassword() {
        return sslTruststorePassword;
    }

    public void setSslTruststorePassword(String sslTruststorePassword) {
        this.sslTruststorePassword = sslTruststorePassword;
    }

    public String getSslTruststoreType() {
        return sslTruststoreType;
    }

    public void setSslTruststoreType(String sslTruststoreType) {
        this.sslTruststoreType = sslTruststoreType;
    }

    public Boolean getUseGlobalSslContextParameters() {
        return useGlobalSslContextParameters;
    }

    public void setUseGlobalSslContextParameters(
            Boolean useGlobalSslContextParameters) {
        this.useGlobalSslContextParameters = useGlobalSslContextParameters;
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy