All Downloads are FREE. Search and download functionalities are using the official Maven repository.

singlenode.factory-akka-default.conf Maven / Gradle / Ivy

There is a newer version: 21.1.0
Show newest version
bounded-mailbox {
  mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
  mailbox-capacity = 5000
  mailbox-push-timeout-time = 10ms
}

metric-capture-enabled = true

notification-dispatcher {
  # Dispatcher is the name of the event-based dispatcher
  type = Dispatcher
  # What kind of ExecutionService to use
  executor = "fork-join-executor"
  # Configuration for the fork join pool
  fork-join-executor {
    # Min number of threads to cap factor-based parallelism number to
    parallelism-min = 2
    # Parallelism (threads) ... ceil(available processors * factor)
    parallelism-factor = 3.0
    # Max number of threads to cap factor-based parallelism number to
    parallelism-max = 15
  }
  # Throughput defines the maximum number of messages to be
  # processed per actor before the thread jumps to the next actor.
  # Set to 1 for as fair as possible.
  throughput = 1
}

serialization-dispatcher {
  type = Dispatcher
  executor = "fork-join-executor"
  fork-join-executor {
    # Min number of threads to cap factor-based parallelism number to
    parallelism-min = 2
    # Parallelism (threads) ... ceil(available processors * factor)
    parallelism-factor = 2.0
    # Max number of threads to cap factor-based parallelism number to
    parallelism-max = 15
  }
  throughput = 1
}

shard-dispatcher {
  type = Dispatcher
  executor = "default-executor"

  # We use a ControlAwareMailbox so that raft messages that implement ControlMessage
  # are given higher priority.
  mailbox-type = "org.opendaylight.controller.cluster.common.actor.UnboundedDequeBasedControlAwareMailbox"
}

akka {
  loglevel = "INFO"
  loggers = ["akka.event.slf4j.Slf4jLogger"]
  logger-startup-timeout = 300s

  # JFR requires boot delegation, which we do not have by default
  java-flight-recorder {
    enabled = false
  }

  actor {
    warn-about-java-serializer-usage = off
    provider = "akka.cluster.ClusterActorRefProvider"
    serializers {
      java = "akka.serialization.JavaSerializer"
      proto = "akka.remote.serialization.ProtobufSerializer"
      readylocal = "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransactionSerializer"
      simpleReplicatedLogEntry = "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntrySerializer"
    }

    serialization-bindings {
      "com.google.protobuf.Message" = proto
      "org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction" = readylocal
      "org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry" = simpleReplicatedLogEntry
    }

    default-dispatcher {
      # Setting throughput to 1 makes the dispatcher fair. It processes 1 message from
      # the mailbox before moving on to the next mailbox
      throughput = 1
    }

    default-mailbox {
      # When not using a BalancingDispatcher it is recommended that we use the SingleConsumerOnlyUnboundedMailbox
      # as it is the most efficient for multiple producer/single consumer use cases
      mailbox-type="akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
    }
  }
  remote {
    log-remote-lifecycle-events = off
    # Disable passive connections, as we are seeing issues
    # with read-only associations
    use-passive-connections = off

    classic.netty.tcp {
      maximum-frame-size = 419430400
      send-buffer-size = 52428800
      receive-buffer-size = 52428800
    }

    artery {
      enabled = on
      transport = tcp

      advanced {
        maximum-frame-size = 512 KiB
        maximum-large-frame-size = 2 MiB
      }
    }
  }

  cluster {
    seed-node-timeout = 12s

    # Following is an excerpt from Akka Cluster Documentation
    # link - http://doc.akka.io/docs/akka/snapshot/java/cluster-usage.html
    # Warning - Akka recommends against using the auto-down feature of Akka Cluster in production.
    # This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
    # especially together with Akka Persistence.

    #auto-down-unreachable-after = 30s

    allow-weakly-up-members = on

    use-dispatcher = cluster-dispatcher

    failure-detector.acceptable-heartbeat-pause = 3 s

    distributed-data {
      # How often the Replicator should send out gossip information.
      # This value controls how quickly Entity Ownership Service data is replicated
      # across cluster nodes.
      gossip-interval = 100 ms

      # How often the subscribers will be notified of changes, if any.
      # This value controls how quickly Entity Ownership Service decisions are
      # propagated within a node.
      notify-subscribers-interval = 20 ms
    }

    downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"

    split-brain-resolver {
      active-strategy = keep-majority
      stable-after = 7s
    }
  }

  persistence {
    journal {
      # The following activates the default segmented file journal. Each persistent actor
      # is stored in a separate directory, with multiple segment files. Segments are removed
      # when they are no longer required.
      #
      plugin = akka.persistence.journal.segmented-file

      segmented-file {
        class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
        # Root directory for segmented journal storage
        root-directory = "target/segmented-journal"
        # Maximum size of a single entry in the segmented journal
        max-entry-size = 16M
        # Maximum size of a segment
        max-segment-size = 128M
        # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
        # Set to <= 0 to flush immediately.
        #max-unflushed-bytes = 1M
        # Map each segment into memory. Defaults to true, use false to keep a heap-based
        # buffer instead.
        memory-mapped = true
      }
    }

    # Journal configuration for shards that have persistence turned off. They still need to have a journal plugin
    # configured, since they still need to store things in the journal occasionally, but having larger segment sizes
    # would be wastefull.
    non-persistent {
      journal {
        class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
        # Root directory for segmented journal storage
        root-directory = "segmented-journal"
        # Maximum size of a single entry in the segmented journal
        max-entry-size = 512K
        # Maximum size of a segment
        max-segment-size = 1M
        # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
        # Set to <= 0 to flush immediately.
        #max-unflushed-bytes = 128K
        # Map each segment into memory. Note that while this can improve performance,
        # it will also place additional burden on system resources.
        memory-mapped = false
      }
    }

    snapshot-store.local.class = "org.opendaylight.controller.cluster.persistence.LocalSnapshotStore"
    snapshot-store.plugin = akka.persistence.snapshot-store.local
  }
}

cluster-dispatcher {
  type = "Dispatcher"
  executor = "fork-join-executor"
  fork-join-executor {
    parallelism-min = 2
    parallelism-max = 4
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy