oney.money-kafka_2.10.0.9.0-RC1.source-code.consumer-defaults.properties Maven / Gradle / Ivy
###
### Consumer Basics
###
# A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same
# group id multiple processes indicate that they are all part of the same consumer group.
#
group.id=test-consumer
# Specifies the zookeeper connection string in the form `hostname:port`, where host and port are the host and port of a
# zookeeper server. To allow connecting through other zookeeper nodes when that zookeeper machine is down, you can also
# specify multiple hosts in the form `hostname1:port1,hostname2:port2,hostname3:port3`.
#
# The server may also have a zookeeper chroot path as part of it's zookeeper connection string which puts its data under
# some path in the global zookeeper namespace. If so the consumer should use the same chroot path in its connection
# string. For example to give a chroot path of /chroot/path you would give the connection string as
# `hostname1:port1,hostname2:port2,hostname3:port3/chroot/path`.
#
# Important: You must only add the custom chroot _to the end of the string_:
#
# # WRONG!
# zookeeper.connect=zkserver1:2181/my_kafka,zkserver:2181/my_kafka
#
# # CORRECT
# zookeeper.connect=zkserver1:2181,zkserver:2181/my_kafka
#
zookeeper.connect=localhost:2181
# If true, periodically commit to zookeeper the offset of messages already fetched by the consumer. This committed
# offset will be used when the process fails as the position from which the new consumer will begin.
#
auto.commit.enable=true
# What to do when there is no initial offset in Zookeeper or if an offset is out of range:
# * "smallest": Automatically reset the offset to the smallest offset (= read from the very beginning of the stream)
# * "largest": Automatically reset the offset to the largest offset (= connect and wait for new data coming in)
# * anything else: Throw exception to the consumer.
#
# If this is set to largest, the consumer may lose some messages when the number of partitions -- for the topics it
# has subscribed to -- changes on the broker. To prevent data loss during partition addition, set auto.offset.reset to
# smallest.
#
auto.offset.reset=largest
# Throw a timeout exception to the consumer if no message is available for consumption after the specified interval.
# A negative value means that the consumer happily waits forever (without throwing a timeout exception).
#
consumer.timeout.ms=-1
# The socket receive buffer for network requests.
#
socket.receive.buffer.bytes=65536
# The socket timeout for network requests.
#
# See the entry "What is the relationship between fetch.wait.max.ms and socket.timeout.ms on the consumer?" in the
# FAQ.
#
socket.timeout.ms=30000
# The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to
# immediately satisfy `fetch.min.bytes`.
#
# See the entry "What is the relationship between fetch.wait.max.ms and socket.timeout.ms on the consumer?" in the
# FAQ.
#
fetch.wait.max.ms=100
# The minimum amount of data the server should return for a fetch request. If insufficient data is available the
# request will wait for that much data to accumulate before answering the request.
#
fetch.min.bytes=1
# The number of bytes of messages to attempt to fetch for each topic-partition in each fetch request. These bytes will
# be read into memory for each partition, so this helps control the memory used by the consumer. The fetch request size
# must be at least as large as the maximum message size the server allows or else it is possible for the producer to
# send messages larger than the consumer can fetch.
#
fetch.message.max.bytes=1048576
# Max number of message chunks buffered for consumption. Each chunk can be up to fetch.message.max.bytes.
#
queued.max.message.chunks=10
# When a new consumer joins a consumer group, the set of consumers attempt to "rebalance" the load to assign partitions
# to each consumer. If the set of consumers changes while this assignment is taking place, the rebalance will fail and
# retry. This setting controls the maximum number of attempts before giving up.
#
rebalance.max.retries=4
# Backoff time between retries during rebalance.
#
rebalance.backoff.ms=2000
# Backoff time to wait before trying to determine the leader of a partition that has just lost its leader.
#
refresh.leader.backoff.ms=200
# Zookeeper session timeout. If the consumer fails to heartbeat to zookeeper for this period of time it is considered
# dead and a rebalance will occur.
#
zookeeper.session.timeout.ms=6000
# The max time that the client waits while establishing a connection to zookeeper.
#
zookeeper.connection.timeout.ms=6000
# How far a ZK follower can be behind a ZK leader
#
zookeeper.sync.time.ms=2000
© 2015 - 2025 Weber Informatics LLC | Privacy Policy