akka.akka-cluster_2.12.2.6.12.source-code.reference.conf Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of akka-cluster_2.12 Show documentation
Show all versions of akka-cluster_2.12 Show documentation
Akka is a toolkit for building highly concurrent, distributed, and resilient message-driven applications for Java and Scala.
######################################
# Akka Cluster Reference Config File #
######################################
# This is the reference config file that contains all the default settings.
# Make your edits/overrides in your application.conf.
akka {
cluster {
# Initial contact points of the cluster.
# The nodes to join automatically at startup.
# Comma separated full URIs defined by a string on the form of
# "akka://system@hostname:port"
# Leave as empty if the node is supposed to be joined manually.
seed-nodes = []
# How long to wait for one of the seed nodes to reply to initial join request.
# When this is the first seed node and there is no positive reply from the other
# seed nodes within this timeout it will join itself to bootstrap the cluster.
# When this is not the first seed node the join attempts will be performed with
# this interval.
seed-node-timeout = 5s
# If a join request fails it will be retried after this period.
# Disable join retry by specifying "off".
retry-unsuccessful-join-after = 10s
# The joining of given seed nodes will by default be retried indefinitely until
# a successful join. That process can be aborted if unsuccessful by defining this
# timeout. When aborted it will run CoordinatedShutdown, which by default will
# terminate the ActorSystem. CoordinatedShutdown can also be configured to exit
# the JVM. It is useful to define this timeout if the seed-nodes are assembled
# dynamically and a restart with new seed-nodes should be tried after unsuccessful
# attempts.
shutdown-after-unsuccessful-join-seed-nodes = off
# Time margin after which shards or singletons that belonged to a downed/removed
# partition are created in surviving partition. The purpose of this margin is that
# in case of a network partition the persistent actors in the non-surviving partitions
# must be stopped before corresponding persistent actors are started somewhere else.
# This is useful if you implement downing strategies that handle network partitions,
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
# Disable with "off" or specify a duration to enable.
#
# When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
# the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
# if this down-removal-margin is undefined.
down-removal-margin = off
# Pluggable support for downing of nodes in the cluster.
# If this setting is left empty the `NoDowning` provider is used and no automatic downing will be performed.
#
# If specified the value must be the fully qualified class name of a subclass of
# `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
downing-provider-class = ""
# Artery only setting
# When a node has been gracefully removed, let this time pass (to allow for example
# cluster singleton handover to complete) and then quarantine the removed node.
quarantine-removed-node-after = 5s
# If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
# split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
# so they become part of the cluster even during a network split. The leader will
# move `Joining` members to 'WeaklyUp' after this configured duration without convergence.
# The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
allow-weakly-up-members = 7s
# The roles of this member. List of strings, e.g. roles = ["A", "B"].
# The roles are part of the membership information and can be used by
# routers or other services to distribute work to certain member types,
# e.g. front-end and back-end nodes.
# Roles are not allowed to start with "dc-" as that is reserved for the
# special role assigned from the data-center a node belongs to (see the
# multi-data-center section below)
roles = []
# Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster
# is shutdown for other reasons than when leaving, e.g. when downing. This
# will terminate the ActorSystem when the cluster extension is shutdown.
run-coordinated-shutdown-when-down = on
role {
# Minimum required number of members of a certain role before the leader
# changes member status of 'Joining' members to 'Up'. Typically used together
# with 'Cluster.registerOnMemberUp' to defer some action, such as starting
# actors, until the cluster has reached a certain size.
# E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend':
# frontend.min-nr-of-members = 2
# backend.min-nr-of-members = 3
#.min-nr-of-members = 1
}
# Application version of the deployment. Used by rolling update features
# to distinguish between old and new nodes. The typical convention is to use
# 3 digit version numbers `major.minor.patch`, but 1 or two digits are also
# supported.
#
# If no `.` is used it is interpreted as a single digit version number or as
# plain alphanumeric if it couldn't be parsed as a number.
#
# It may also have a qualifier at the end for 2 or 3 digit version numbers such
# as "1.2-RC1".
# For 1 digit with qualifier, 1-RC1, it is interpreted as plain alphanumeric.
#
# It has support for https://github.com/dwijnand/sbt-dynver format with `+` or
# `-` separator. The number of commits from the tag is handled as a numeric part.
# For example `1.0.0+3-73475dce26` is less than `1.0.10+10-ed316bd024` (3 < 10).
app-version = "0.0.0"
# Minimum required number of members before the leader changes member status
# of 'Joining' members to 'Up'. Typically used together with
# 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
# until the cluster has reached a certain size.
min-nr-of-members = 1
# Enable/disable info level logging of cluster events.
# These are logged with logger name `akka.cluster.Cluster`.
log-info = on
# Enable/disable verbose info-level logging of cluster events
# for temporary troubleshooting. Defaults to 'off'.
# These are logged with logger name `akka.cluster.Cluster`.
log-info-verbose = off
# Enable or disable JMX MBeans for management of the cluster
jmx.enabled = on
# Enable or disable multiple JMX MBeans in the same JVM
# If this is disabled, the MBean Object name is "akka:type=Cluster"
# If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber"
jmx.multi-mbeans-in-same-jvm = off
# how long should the node wait before starting the periodic tasks
# maintenance tasks?
periodic-tasks-initial-delay = 1s
# how often should the node send out gossip information?
gossip-interval = 1s
# discard incoming gossip messages if not handled within this duration
gossip-time-to-live = 2s
# how often should the leader perform maintenance tasks?
leader-actions-interval = 1s
# how often should the node move nodes, marked as unreachable by the failure
# detector, out of the membership ring?
unreachable-nodes-reaper-interval = 1s
# How often the current internal stats should be published.
# A value of 0s can be used to always publish the stats, when it happens.
# Disable with "off".
publish-stats-interval = off
# The id of the dispatcher to use for cluster actors.
# If specified you need to define the settings of the actual dispatcher.
use-dispatcher = "akka.actor.internal-dispatcher"
# Gossip to random node with newer or older state information, if any with
# this probability. Otherwise Gossip to any random live node.
# Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
gossip-different-view-probability = 0.8
# Reduced the above probability when the number of nodes in the cluster
# greater than this value.
reduce-gossip-different-view-probability = 400
# When a node is removed the removal is marked with a tombstone
# which is kept at least this long, after which it is pruned, if there is a partition
# longer than this it could lead to removed nodes being re-added to the cluster
prune-gossip-tombstones-after = 24h
# Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
# [Hayashibara et al]) used by the cluster subsystem to detect unreachable
# members.
# The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
# the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
# i.e. around 5.5 seconds with default settings.
failure-detector {
# FQCN of the failure detector implementation.
# It must implement akka.remote.FailureDetector and have
# a public constructor with a com.typesafe.config.Config and
# akka.actor.EventStream parameter.
implementation-class = "akka.remote.PhiAccrualFailureDetector"
# How often keep-alive heartbeat messages should be sent to each connection.
heartbeat-interval = 1 s
# Defines the failure detector threshold.
# A low threshold is prone to generate many wrong suspicions but ensures
# a quick detection in the event of a real crash. Conversely, a high
# threshold generates fewer mistakes but needs more time to detect
# actual crashes.
threshold = 8.0
# Number of the samples of inter-heartbeat arrival times to adaptively
# calculate the failure timeout for connections.
max-sample-size = 1000
# Minimum standard deviation to use for the normal distribution in
# AccrualFailureDetector. Too low standard deviation might result in
# too much sensitivity for sudden, but normal, deviations in heartbeat
# inter arrival times.
min-std-deviation = 100 ms
# Number of potentially lost/delayed heartbeats that will be
# accepted before considering it to be an anomaly.
# This margin is important to be able to survive sudden, occasional,
# pauses in heartbeat arrivals, due to for example garbage collect or
# network drop.
acceptable-heartbeat-pause = 3 s
# Number of member nodes that each member will send heartbeat messages to,
# i.e. each node will be monitored by this number of other nodes.
monitored-by-nr-of-members = 9
# After the heartbeat request has been sent the first failure detection
# will start after this period, even though no heartbeat message has
# been received.
expected-response-after = 1 s
}
# Configures multi-dc specific heartbeating and other mechanisms,
# many of them have a direct counter-part in "one datacenter mode",
# in which case these settings would not be used at all - they only apply,
# if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values.
multi-data-center {
# Defines which data center this node belongs to. It is typically used to make islands of the
# cluster that are colocated. This can be used to make the cluster aware that it is running
# across multiple availability zones or regions. It can also be used for other logical
# grouping of nodes.
self-data-center = "default"
# Try to limit the number of connections between data centers. Used for gossip and heartbeating.
# This will not limit connections created for the messaging of the application.
# If the cluster does not span multiple data centers, this value has no effect.
cross-data-center-connections = 5
# The n oldest nodes in a data center will choose to gossip to another data center with
# this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always.
# When a data center is first started (nodes < 5) a higher probability is used so other data
# centers find out about the new nodes more quickly
cross-data-center-gossip-probability = 0.2
failure-detector {
# FQCN of the failure detector implementation.
# It must implement akka.remote.FailureDetector and have
# a public constructor with a com.typesafe.config.Config and
# akka.actor.EventStream parameter.
implementation-class = "akka.remote.DeadlineFailureDetector"
# Number of potentially lost/delayed heartbeats that will be
# accepted before considering it to be an anomaly.
# This margin is important to be able to survive sudden, occasional,
# pauses in heartbeat arrivals, due to for example garbage collect or
# network drop.
acceptable-heartbeat-pause = 10 s
# How often keep-alive heartbeat messages should be sent to each connection.
heartbeat-interval = 3 s
# After the heartbeat request has been sent the first failure detection
# will start after this period, even though no heartbeat message has
# been received.
expected-response-after = 1 s
}
}
# If the tick-duration of the default scheduler is longer than the
# tick-duration configured here a dedicated scheduler will be used for
# periodic tasks of the cluster, otherwise the default scheduler is used.
# See akka.scheduler settings for more details.
scheduler {
tick-duration = 33ms
ticks-per-wheel = 512
}
debug {
# Log heartbeat events (very verbose, useful mostly when debugging heartbeating issues).
# These are logged with logger name `akka.cluster.ClusterHeartbeat`.
verbose-heartbeat-logging = off
# log verbose details about gossip
verbose-gossip-logging = off
}
configuration-compatibility-check {
# Enforce configuration compatibility checks when joining a cluster.
# Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
# when the cluster does not support this feature. Compatibility checks are always performed and warning and
# error messages are logged.
#
# This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
# cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
# to 'know' if its allowed to join.
enforce-on-join = on
# Add named entry to this section with fully qualified class name of the JoinConfigCompatChecker
# to enable.
# Checkers defined in reference.conf can be disabled by application by using empty string value
# for the named entry.
checkers {
akka-cluster = "akka.cluster.JoinConfigCompatCheckCluster"
}
# Some configuration properties might not be appropriate to transfer between nodes
# and such properties can be excluded from the configuration compatibility check by adding
# the paths of the properties to this list. Sensitive paths are grouped by key. Modules and third-party libraries
# can define their own set of sensitive paths without clashing with each other (as long they use unique keys).
#
# All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole
# section here to skip everything inside that section.
sensitive-config-paths {
akka = [
"user.home", "user.name", "user.dir",
"socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
"akka.remote.secure-cookie",
"akka.remote.classic.netty.ssl.security",
# Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
"akka.remote.netty.ssl.security",
"akka.remote.artery.ssl"
]
}
}
}
actor.deployment.default.cluster {
# enable cluster aware router that deploys to nodes in the cluster
enabled = off
# Maximum number of routees that will be deployed on each cluster
# member node.
# Note that max-total-nr-of-instances defines total number of routees, but
# number of routees per node will not be exceeded, i.e. if you
# define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2
# it will deploy 2 routees per new member in the cluster, up to
# 25 members.
max-nr-of-instances-per-node = 1
# Maximum number of routees that will be deployed, in total
# on all nodes. See also description of max-nr-of-instances-per-node.
# For backwards compatibility reasons, nr-of-instances
# has the same purpose as max-total-nr-of-instances for cluster
# aware routers and nr-of-instances (if defined by user) takes
# precedence over max-total-nr-of-instances.
max-total-nr-of-instances = 10000
# Defines if routees are allowed to be located on the same node as
# the head router actor, or only on remote nodes.
# Useful for master-worker scenario where all routees are remote.
allow-local-routees = on
# Use members with all specified roles, or all members if undefined or empty.
use-roles = []
# Deprecated, since Akka 2.5.4, replaced by use-roles
# Use members with specified role, or all members if undefined or empty.
use-role = ""
}
# Protobuf serializer for cluster messages
actor {
serializers {
akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer"
}
serialization-bindings {
"akka.cluster.ClusterMessage" = akka-cluster
"akka.cluster.routing.ClusterRouterPool" = akka-cluster
}
serialization-identifiers {
"akka.cluster.protobuf.ClusterMessageSerializer" = 5
}
}
}
#//#split-brain-resolver
# To enable the split brain resolver you first need to enable the provider in your application.conf:
# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
akka.cluster.split-brain-resolver {
# Select one of the available strategies (see descriptions below):
# static-quorum, keep-majority, keep-oldest, down-all, lease-majority
active-strategy = keep-majority
#//#stable-after
# Time margin after which shards or singletons that belonged to a downed/removed
# partition are created in surviving partition. The purpose of this margin is that
# in case of a network partition the persistent actors in the non-surviving partitions
# must be stopped before corresponding persistent actors are started somewhere else.
# This is useful if you implement downing strategies that handle network partitions,
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
# Decision is taken by the strategy when there has been no membership or
# reachability changes for this duration, i.e. the cluster state is stable.
stable-after = 20s
#//#stable-after
# When reachability observations by the failure detector are changed the SBR decisions
# are deferred until there are no changes within the 'stable-after' duration.
# If this continues for too long it might be an indication of an unstable system/network
# and it could result in delayed or conflicting decisions on separate sides of a network
# partition.
# As a precaution for that scenario all nodes are downed if no decision is made within
# `stable-after + down-all-when-unstable` from the first unreachability event.
# The measurement is reset if all unreachable have been healed, downed or removed, or
# if there are no changes within `stable-after * 2`.
# The value can be on, off, or a duration.
# By default it is 'on' and then it is derived to be 3/4 of stable-after, but not less than
# 4 seconds.
down-all-when-unstable = on
}
#//#split-brain-resolver
# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
# side of the partition. In other words, the 'size' defines the minimum number of nodes
# that the cluster must have to be operational. If there are unreachable nodes when starting
# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
# This is not an issue if you start all nodes at approximately the same time.
#
# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
# then both sides may down each other and thereby form two separate clusters. For example,
# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
# itself. A warning is logged if this recommendation is violated.
#//#static-quorum
akka.cluster.split-brain-resolver.static-quorum {
# minimum number of nodes that the cluster must have
quorum-size = undefined
# if the 'role' is defined the decision is based only on members with that 'role'
role = ""
}
#//#static-quorum
# Down the unreachable nodes if the current node is in the majority part based the last known
# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
# the parts are of equal size the part containing the node with the lowest address is kept.
# Note that if there are more than two partitions and none is in majority each part
# will shutdown itself, terminating the whole cluster.
#//#keep-majority
akka.cluster.split-brain-resolver.keep-majority {
# if the 'role' is defined the decision is based only on members with that 'role'
role = ""
}
#//#keep-majority
# Down the part that does not contain the oldest member (current singleton).
#
# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
# Then, if the oldest node has partitioned from all other nodes the oldest
# will down itself and keep all other nodes running. The strategy will not
# down the single oldest node when it is the only remaining node in the cluster.
#
# Note that if the oldest node crashes the others will remove it from the cluster
# when 'down-if-alone' is 'on', otherwise they will down themselves if the
# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
#//#keep-oldest
akka.cluster.split-brain-resolver.keep-oldest {
# Enable downing of the oldest node when it is partitioned from all other nodes
down-if-alone = on
# if the 'role' is defined the decision is based only on members with that 'role',
# i.e. using the oldest member (singleton) within the nodes with that role
role = ""
}
#//#keep-oldest
# Keep the part that can acquire the lease, and down the other part.
# Best effort is to keep the side that has most nodes, i.e. the majority side.
# This is achieved by adding a delay before trying to acquire the lease on the
# minority side.
#//#lease-majority
akka.cluster.split-brain-resolver.lease-majority {
lease-implementation = ""
# This delay is used on the minority side before trying to acquire the lease,
# as an best effort to try to keep the majority side.
acquire-lease-delay-for-minority = 2s
# If the 'role' is defined the majority/minority is based only on members with that 'role'.
role = ""
}
#//#lease-majority