oney.money-kafka_2.10.0.9.0-RC1.source-code.producer-defaults.properties Maven / Gradle / Ivy
###
### Producer Basics
###
# The client id is a user-specified string sent in each request to help trace calls. It should logically identify the
# application making the request.
#
client.id=test-producer
# This is for bootstrapping knowledge about the rest of the cluster, and the producer will only use it for getting
# metadata (topics, partitions and replicas). The socket connections for sending the actual data will be established
# based on the broker information returned in the metadata.
#
# The format is:
# host1:port1,host2:port2
#
# The list can be a subset of brokers or a VIP pointing to a subset of brokers.
#
metadata.broker.list=localhost:9092
# The partitioner class for partitioning messages amongst sub-topics. The default partitioner is based on the hash of
# the key.
#
partitioner.class=kafka.producer.DefaultPartitioner
# specifies whether the messages are sent asynchronously (async) or synchronously (sync)
# Specifies whether the messages are sent asynchronously in a background thread. Valid values are (1) "async" for
# asynchronous send and (2) "sync" for synchronous send. By setting the producer to async we allow batching together of
# requests (which is great for throughput) but open the possibility of a failure of the client machine dropping unsent
# data.
#
producer.type=sync
# This value controls when a produce request is considered completed. Specifically, how many other brokers must have
# committed the data to their log and acknowledged this to the leader? Typical values are:
#
# 0) The producer never waits for an acknowledgement from the broker, i.e. fire-and-forget (the same behavior as 0.7).
# This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a
# server fails).
# 1) The producer gets an acknowledgement after the leader replica has received the data.
# This option provides better durability as the client waits until the server acknowledges the request as successful
# (only messages that were written to the now-dead leader but not yet replicated will be lost).
# -1) The producer gets an acknowledgement after all in-sync replicas have received the data.
# This option provides the best durability, we guarantee that no messages will be lost as long as at least one
# in-sync replica remains.
#
# In general, the valid range of this setting is [-1, #numPartitionsOfTopic].
#
request.required.acks=0
# This property will cause the producer to automatically retry a failed send request. This property specifies the
# number of retries when such failures occur. Note that setting a non-zero value here can lead to duplicates in the case
# of network errors that cause a message to be sent but the acknowledgement to be lost.
message.send.max.retries=3
# The producer generally refreshes the topic metadata from brokers when there is a failure (key missing, leader
# not available...). It will also poll regularly (default: every 10min = 600000ms).
#
# If you set this to a negative value, metadata will only get refreshed on failure.
#
# If you set this to zero, the metadata will get refreshed after each message sent (not recommended).
# Important note: The refresh happens only AFTER the message is sent, so if the producer never sends a message the
# metadata is never refreshed!
#
topic.metadata.refresh.interval.ms=600000
# Specify the compression codec for all data generated by this producer. Valid values are "none", "gzip" and "snappy".
# The old config values work as well: 0 (none), 1 (gzipo), 2 (snappy).
#
compression.codec=snappy
# The serializer class for messages. The default encoder takes a byte[] and returns the same byte[].
#
serializer.class=kafka.serializer.DefaultEncoder
# Set whether compression should be turned on for particular topics. If the compression codec is anything other than
# `NoCompressionCodec`, enable compression only for specified topics if any. If the list of compressed topics is empty,
# then enable the specified compression codec for all topics. If the compression codec is `NoCompressionCodec`,
# compression is disabled for all topics.
#
#compressed.topics=
###
### Async Producer
###
# Maximum time to buffer data when using async mode. For example a setting of 100 will try to batch together 100ms of
# messages to send at once. This will improve throughput but adds message delivery latency due to the buffering.
#
queue.buffering.max.ms=5000
# The maximum number of unsent messages that can be queued up the producer when using async mode before either the
# producer must be blocked or data must be dropped.
#
queue.buffering.max.messages=10000
# The amount of time to block before dropping messages when running in async mode and the buffer has reached
# `queue.buffering.max.messages`.
#
# 0: Events will be enqueued immediately or dropped if the queue is full (the producer send call will never
# block).
# -ve: The producer will block indefinitely if the queue is full, and it will never willingly drop a send.
# +ve: The producer will block up to this many milliseconds if the queue is full.
#
queue.enqueue.timeout.ms=-1
# The number of messages to send in one batch when using async mode. The producer will wait until either this number of
# messages are ready to send or queue.buffer.max.ms is reached.
#
# Important note: If compression is enabled, then the compressed batch of messages is treated as a single message,
# whose size must be smaller than `max.message.bytes`. If compression is disabled, only each individual (uncompressed)
# message must be smaller than `max.message.byes`, i.e. in this case the batch size does not really matter w.r.t.
# `max.message.bytes`.
# See http://grokbase.com/t/kafka/users/139v9xqqj7/understanding-messagesizetoolarge-and-batches.
#
batch.num.messages=200
© 2015 - 2025 Weber Informatics LLC | Privacy Policy