brooklyn.entity.messaging.kafka.server.properties Maven / Gradle / Ivy
[#ftl]
#
##
# KafkaBroker configuration template for Brooklyn
#
# see kafka.server.KafkaConfig for additional details and defaults
##
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
brokerid=${entity.brokerId?c}
# 0.7 syntax above, 0.8 syntax below
broker.id=${entity.brokerId?c}
# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
# from InetAddress.getLocalHost(). If there are multiple interfaces getLocalHost
# may not be what you want.
hostname=${driver.hostname}
# 0.7 syntax above, 0.8 syntax below
host.name=${driver.hostname}
# many of the settings below are for 0.7 only (but they are the default; i've updated the essential ones)
# TODO should create a new kafka server.properties for 0.8
############################# Socket Server Settings #############################
# The port the socket server listens on
port=${entity.kafkaPort?c}
# The number of processor threads the socket server uses for receiving and answering requests.
# Defaults to the number of cores on the machine
num.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer=1048576
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer=1048576
# The maximum size of a request that the socket server will accept (protection against OOM)
max.socket.request.bytes=104857600
############################# Log Basics #############################
# The directory under which to store log files
log.dir=${driver.runDir}/kafka-logs
# The number of logical partitions per topic per server. More partitions allow greater parallelism
# for consumption, but also mean more files.
num.partitions=1
# Overrides for for the default given by num.partitions on a per-topic basis
#topic.partition.count.map=topic1:3, topic2:4
############################# Log Flush Policy #############################
# The following configurations control the flush of data to disk. This is the most
# important performance knob in kafka.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
# 2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
# 3. Throughput: The flush is generally the most expensive operation.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
log.flush.interval=10000
# The maximum amount of time a message can sit in a log before we force a flush
log.default.flush.interval.ms=1000
# Per-topic overrides for log.default.flush.interval.ms
#topic.flush.intervals.ms=topic1:1000, topic2:3000
# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
log.default.flush.scheduler.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.size.
#log.retention.size=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.file.size=536870912
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.cleanup.interval.mins=1
############################# Zookeeper #############################
# Enable connecting to zookeeper
enable.zookeeper=true
# Zk connection string (see zk docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zk.connect=${entity.zookeeper.hostname}:${entity.zookeeper.zookeeperPort?c}
# 0.7 syntax above, 0.8 syntax below
zookeeper.connect=${entity.zookeeper.hostname}:${entity.zookeeper.zookeeperPort?c}
# Timeout in ms for connecting to zookeeper
zk.connectiontimeout.ms=1000000
# 0.7 syntax above, 0.8 syntax below
zookeeper.connection.timeout.ms=1000000