All Downloads are FREE. Search and download functionalities are using the official Maven repository.

akka.akka-cluster_2.13.0-RC2.2.6.0-M2.source-code.reference.conf Maven / Gradle / Ivy

The newest version!
######################################
# Akka Cluster Reference Config File #
######################################

# This is the reference config file that contains all the default settings.
# Make your edits/overrides in your application.conf.

akka {

  cluster {
    # Initial contact points of the cluster.
    # The nodes to join automatically at startup.
    # Comma separated full URIs defined by a string on the form of
    # "akka://system@hostname:port"
    # Leave as empty if the node is supposed to be joined manually.
    seed-nodes = []

    # How long to wait for one of the seed nodes to reply to initial join request.
    # When this is the first seed node and there is no positive reply from the other
    # seed nodes within this timeout it will join itself to bootstrap the cluster.
    # When this is not the first seed node the join attempts will be performed with
    # this interval.  
    seed-node-timeout = 5s

    # If a join request fails it will be retried after this period.
    # Disable join retry by specifying "off".
    retry-unsuccessful-join-after = 10s
    
    # The joining of given seed nodes will by default be retried indefinitely until
    # a successful join. That process can be aborted if unsuccessful by defining this
    # timeout. When aborted it will run CoordinatedShutdown, which by default will
    # terminate the ActorSystem. CoordinatedShutdown can also be configured to exit
    # the JVM. It is useful to define this timeout if the seed-nodes are assembled
    # dynamically and a restart with new seed-nodes should be tried after unsuccessful
    # attempts.   
    shutdown-after-unsuccessful-join-seed-nodes = off

    # Should the 'leader' in the cluster be allowed to automatically mark
    # unreachable nodes as DOWN after a configured time of unreachability?
    # Using auto-down implies that two separate clusters will automatically be
    # formed in case of network partition.
    #
    # Don't enable this in production, see 'Auto-downing (DO NOT USE)' section
    # of Akka Cluster documentation.
    #
    # Disable with "off" or specify a duration to enable auto-down.
    # If a downing-provider-class is configured this setting is ignored.
    auto-down-unreachable-after = off

    # Time margin after which shards or singletons that belonged to a downed/removed
    # partition are created in surviving partition. The purpose of this margin is that
    # in case of a network partition the persistent actors in the non-surviving partitions
    # must be stopped before corresponding persistent actors are started somewhere else.
    # This is useful if you implement downing strategies that handle network partitions,
    # e.g. by keeping the larger side of the partition and shutting down the smaller side.
    # It will not add any extra safety for auto-down-unreachable-after, since that is not
    # handling network partitions.
    # Disable with "off" or specify a duration to enable.
    down-removal-margin = off

    # Pluggable support for downing of nodes in the cluster.
    # If this setting is left empty behavior will depend on 'auto-down-unreachable' in the following ways:
    # * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed
    # * if it is set to a duration the `AutoDowning` provider is with the configured downing duration
    #
    # If specified the value must be the fully qualified class name of a subclass of
    # `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
    downing-provider-class = ""

    # Artery only setting
    # When a node has been gracefully removed, let this time pass (to allow for example
    # cluster singleton handover to complete) and then quarantine the removed node.
    quarantine-removed-node-after = 5s

    # If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
    # split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
    # so they become part of the cluster even during a network split. The leader will
    # move `Joining` members to 'WeaklyUp' after 3 rounds of 'leader-actions-interval'
    # without convergence.
    # The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
    allow-weakly-up-members = on

    # The roles of this member. List of strings, e.g. roles = ["A", "B"].
    # The roles are part of the membership information and can be used by
    # routers or other services to distribute work to certain member types,
    # e.g. front-end and back-end nodes.
    # Roles are not allowed to start with "dc-" as that is reserved for the
    # special role assigned from the data-center a node belongs to (see the
    # multi-data-center section below)
    roles = []
    
    # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster
    # is shutdown for other reasons than when leaving, e.g. when downing. This
    # will terminate the ActorSystem when the cluster extension is shutdown.
    run-coordinated-shutdown-when-down = on

    role {
      # Minimum required number of members of a certain role before the leader
      # changes member status of 'Joining' members to 'Up'. Typically used together
      # with 'Cluster.registerOnMemberUp' to defer some action, such as starting
      # actors, until the cluster has reached a certain size.
      # E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend':
      #   frontend.min-nr-of-members = 2
      #   backend.min-nr-of-members = 3
      #.min-nr-of-members = 1
    }

    # Minimum required number of members before the leader changes member status
    # of 'Joining' members to 'Up'. Typically used together with
    # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
    # until the cluster has reached a certain size.
    min-nr-of-members = 1

    # Enable/disable info level logging of cluster events
    log-info = on

    # Enable/disable verbose info-level logging of cluster events
    # for temporary troubleshooting. Defaults to 'off'.
    log-info-verbose = off

    # Enable or disable JMX MBeans for management of the cluster
    jmx.enabled = on

    # Enable or disable multiple JMX MBeans in the same JVM
    # If this is disabled, the MBean Object name is "akka:type=Cluster"
    # If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber"
    jmx.multi-mbeans-in-same-jvm = off

    # how long should the node wait before starting the periodic tasks
    # maintenance tasks?
    periodic-tasks-initial-delay = 1s

    # how often should the node send out gossip information?
    gossip-interval = 1s
    
    # discard incoming gossip messages if not handled within this duration
    gossip-time-to-live = 2s

    # how often should the leader perform maintenance tasks?
    leader-actions-interval = 1s

    # how often should the node move nodes, marked as unreachable by the failure
    # detector, out of the membership ring?
    unreachable-nodes-reaper-interval = 1s

    # How often the current internal stats should be published.
    # A value of 0s can be used to always publish the stats, when it happens.
    # Disable with "off".
    publish-stats-interval = off

    # The id of the dispatcher to use for cluster actors.
    # If specified you need to define the settings of the actual dispatcher.
    use-dispatcher = "akka.actor.internal-dispatcher"

    # Gossip to random node with newer or older state information, if any with
    # this probability. Otherwise Gossip to any random live node.
    # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
    gossip-different-view-probability = 0.8
    
    # Reduced the above probability when the number of nodes in the cluster
    # greater than this value.
    reduce-gossip-different-view-probability = 400

    # When a node is removed the removal is marked with a tombstone
    # which is kept at least this long, after which it is pruned, if there is a partition
    # longer than this it could lead to removed nodes being re-added to the cluster
    prune-gossip-tombstones-after = 24h

    # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
    # [Hayashibara et al]) used by the cluster subsystem to detect unreachable
    # members.
    # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
    # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
    # i.e. around 5.5 seconds with default settings.
    failure-detector {

      # FQCN of the failure detector implementation.
      # It must implement akka.remote.FailureDetector and have
      # a public constructor with a com.typesafe.config.Config and
      # akka.actor.EventStream parameter.
      implementation-class = "akka.remote.PhiAccrualFailureDetector"

      # How often keep-alive heartbeat messages should be sent to each connection.
      heartbeat-interval = 1 s

      # Defines the failure detector threshold.
      # A low threshold is prone to generate many wrong suspicions but ensures
      # a quick detection in the event of a real crash. Conversely, a high
      # threshold generates fewer mistakes but needs more time to detect
      # actual crashes.
      threshold = 8.0

      # Number of the samples of inter-heartbeat arrival times to adaptively
      # calculate the failure timeout for connections.
      max-sample-size = 1000

      # Minimum standard deviation to use for the normal distribution in
      # AccrualFailureDetector. Too low standard deviation might result in
      # too much sensitivity for sudden, but normal, deviations in heartbeat
      # inter arrival times.
      min-std-deviation = 100 ms

      # Number of potentially lost/delayed heartbeats that will be
      # accepted before considering it to be an anomaly.
      # This margin is important to be able to survive sudden, occasional,
      # pauses in heartbeat arrivals, due to for example garbage collect or
      # network drop.
      acceptable-heartbeat-pause = 3 s

      # Number of member nodes that each member will send heartbeat messages to,
      # i.e. each node will be monitored by this number of other nodes.
      monitored-by-nr-of-members = 5
      
      # After the heartbeat request has been sent the first failure detection
      # will start after this period, even though no heartbeat message has
      # been received.
      expected-response-after = 1 s

    }

    # Configures multi-dc specific heartbeating and other mechanisms,
    # many of them have a direct counter-part in "one datacenter mode",
    # in which case these settings would not be used at all - they only apply,
    # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values.
    multi-data-center {

      # Defines which data center this node belongs to. It is typically used to make islands of the
      # cluster that are colocated. This can be used to make the cluster aware that it is running
      # across multiple availability zones or regions. It can also be used for other logical
      # grouping of nodes.
      self-data-center = "default"


      # Try to limit the number of connections between data centers. Used for gossip and heartbeating.
      # This will not limit connections created for the messaging of the application.
      # If the cluster does not span multiple data centers, this value has no effect.
      cross-data-center-connections = 5

      # The n oldest nodes in a data center will choose to gossip to another data center with
      # this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always.
      # When a data center is first started (nodes < 5) a higher probability is used so other data
      # centers find out about the new nodes more quickly
      cross-data-center-gossip-probability = 0.2

      failure-detector {
        # FQCN of the failure detector implementation.
        # It must implement akka.remote.FailureDetector and have
        # a public constructor with a com.typesafe.config.Config and
        # akka.actor.EventStream parameter.
        implementation-class = "akka.remote.DeadlineFailureDetector"
  
        # Number of potentially lost/delayed heartbeats that will be
        # accepted before considering it to be an anomaly.
        # This margin is important to be able to survive sudden, occasional,
        # pauses in heartbeat arrivals, due to for example garbage collect or
        # network drop.
        acceptable-heartbeat-pause = 10 s
        
        # How often keep-alive heartbeat messages should be sent to each connection.
        heartbeat-interval = 3 s
  
        # After the heartbeat request has been sent the first failure detection
        # will start after this period, even though no heartbeat message has
        # been received.
        expected-response-after = 1 s
      }
    }

    # If the tick-duration of the default scheduler is longer than the
    # tick-duration configured here a dedicated scheduler will be used for
    # periodic tasks of the cluster, otherwise the default scheduler is used.
    # See akka.scheduler settings for more details.
    scheduler {
      tick-duration = 33ms
      ticks-per-wheel = 512
    }

    debug {
      # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues)
      verbose-heartbeat-logging = off

      # log verbose details about gossip
      verbose-gossip-logging = off
    }

    configuration-compatibility-check {

      # Enforce configuration compatibility checks when joining a cluster.
      # Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
      # when the cluster does not support this feature. Compatibility checks are always performed and warning and
      # error messsages are logged.
      #
      # This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
      # cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
      # to 'know' if its allowed to join.
      enforce-on-join = on

      # Add named entry to this section with fully qualified class name of the JoinConfigCompatChecker
      # to enable.
      # Checkers defined in reference.conf can be disabled by application by using empty string value
      # for the named entry.
      checkers {
        akka-cluster = "akka.cluster.JoinConfigCompatCheckCluster"
      }

      # Some configuration properties might not be appropriate to transfer between nodes
      # and such properties can be excluded from the configuration compatibility check by adding
      # the paths of the properties to this list. Sensitive paths are grouped by key. Modules and third-party libraries
      # can define their own set of sensitive paths without clashing with each other (as long they use unique keys).
      #
      # All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole
      # section here to skip everything inside that section.
      sensitive-config-paths {
        akka = [
          "user.home", "user.name", "user.dir",
          "socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
          "akka.remote.secure-cookie",
          "akka.remote.classic.netty.ssl.security",
          # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
          "akka.remote.netty.ssl.security",
          "akka.remote.artery.ssl"
        ]
      }

    }
  }

  actor.deployment.default.cluster {
    # enable cluster aware router that deploys to nodes in the cluster
    enabled = off

    # Maximum number of routees that will be deployed on each cluster
    # member node.
    # Note that max-total-nr-of-instances defines total number of routees, but
    # number of routees per node will not be exceeded, i.e. if you
    # define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2
    # it will deploy 2 routees per new member in the cluster, up to
    # 25 members.
    max-nr-of-instances-per-node = 1
    
    # Maximum number of routees that will be deployed, in total
    # on all nodes. See also description of max-nr-of-instances-per-node.
    # For backwards compatibility reasons, nr-of-instances
    # has the same purpose as max-total-nr-of-instances for cluster
    # aware routers and nr-of-instances (if defined by user) takes
    # precedence over max-total-nr-of-instances. 
    max-total-nr-of-instances = 10000

    # Defines if routees are allowed to be located on the same node as
    # the head router actor, or only on remote nodes.
    # Useful for master-worker scenario where all routees are remote.
    allow-local-routees = on

    # Use members with all specified roles, or all members if undefined or empty.
    use-roles = []

    # Deprecated, since Akka 2.5.4, replaced by use-roles
    # Use members with specified role, or all members if undefined or empty.
    use-role = ""
  }

  # Protobuf serializer for cluster messages
  actor {
    serializers {
      akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer"
    }

    serialization-bindings {
      "akka.cluster.ClusterMessage" = akka-cluster
      "akka.cluster.routing.ClusterRouterPool" = akka-cluster
    }
    
    serialization-identifiers {
      "akka.cluster.protobuf.ClusterMessageSerializer" = 5
    }
    
  }

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy