All Downloads are FREE. Search and download functionalities are using the official Maven repository.

n-manes.caffeine.simulator.2.3.0.source-code.reference.conf Maven / Gradle / Ivy

There is a newer version: 3.2.0
Show newest version
####################################
#   Cache Simulator Config File    #
####################################

# This is the reference config file that contains all the default settings.
# Make your edits/overrides in your application.conf.

# See http://doc.akka.io/docs/akka/current/general/configuration.html
akka {
  loglevel = "WARNING"

  default-mailbox {
    mailbox-capacity = 50
    mailbox-type = "akka.dispatch.BoundedMailbox"
  }
}

caffeine.simulator {
  report {
    # Formats: table, csv
    format = "table"

    # Columns: policy, hit rate, hits, misses, evictions, steps, time
    sort-by = "policy"
    ascending = true

    # The output destination, either the console or a file path
    output = "console"
  }

  # The seed for randomized operations
  random-seed = "1033096058"

  # The number of events to send per actor message
  batch-size = 1000

  # The maximum number of entries in the cache
  maximum-size = 512

  policies = [
    # Policies that provide an optimal upper bound
    "opt.Unbounded",
    "opt.Clairvoyant",

    # Policies based on maintaining a linked-list cross-cutting the hash table
    "linked.Lru",
    "linked.Mru",
    "linked.Lfu",
    "linked.Mfu",
    "linked.Fifo",
    "linked.Clock",
    "linked.S4Lru",
    "linked.MultiQueue",
    "linked.SegmentedLru",

    # Policies based on obtaining a random sampling from the hash table
    "sampled.Lru",
    "sampled.Mru",
    "sampled.Lfu",
    "sampled.Mfu",
    "sampled.Fifo",
    "sampled.Random",

    # Policies based on the 2Q algorithm
    "two-queue.TwoQueue",
    "two-queue.TuQueue",

    # Policies based on a sketch algorithm
    "sketch.WindowTinyLfu",
    "sketch.S4WindowTinyLfu",
    "sketch.SimpleWindowTinyLfu",
    "sketch.RandomWindowTinyLfu",
    "sketch.FullySegmentedWindowTinyLfu",

    "sketch.TinyCache",
    "sketch.TinyCache_GhostCache",
    "sketch.WindowTinyCache",

    # Policies based on the LIRS algorithm
    "irr.Lirs",
    "irr.ClockPro",

    # Policies based on the ARC algorithm
    "adaptive.Arc",
    "adaptive.Car",
    "adaptive.Cart",

    # Caching products
    "product.Guava",
    "product.TCache",
    "product.Cache2k",
    "product.Caffeine",
    "product.Ehcache2",
    "product.Ehcache3",
    "product.Infinispan",
  ]

  # The admission policy (opposite of eviction policy)
  admission = [
    "Always",
    "TinyLfu",
  ]

  sampling {
    # The random sample size
    size = 8

    # guess: Chooses items at random until the sample size is reached
    # shuffle: http://en.wikipedia.org/wiki/Fisher–Yates_shuffle
    # reservoir: http://en.wikipedia.org/wiki/Reservoir_sampling
    strategy = "guess"
  }

  multi-queue {
    # The logical time that an entry can reside idle in a queue before being demoted
    lifetime = 75
    # The number of queues using a 2^n frequency distribution
    num-queues = 8
    # The percentage for the OUT queue
    percent-out = "0.50"
  }

  segmented-lru {
    # The percentage for the PROTECTED queue
    percent-protected = "0.80"
  }

  s4lru {
    # The number of segments
    levels = 4
  }

  two-queue {
    # The percentage for the IN queue
    percent-in = "0.20"
    # The percentage for the OUT queue
    percent-out = "0.50"
  }

  tu-queue {
    # The percentage for the HOT queue
    percent-hot = "0.33"
    # The percentage for the WARM queue
    percent-warm = "0.33"
  }

  tiny-lfu {
    # CountMinSketch: count-min-4 (4-bit), count-min-64 (64-bit)
    # Table: random-table, tiny-table, perfect-table
    sketch = "count-min-4"

    # If increments are conservative by only updating the minimum counters for CountMin sketches
    count-min.conservative = false

    count-min-64 {
      eps = "0.0001"
      confidence = "0.99"
    }

    count-min-4 {
      # periodic: Resets by periodically halving all counters
      # incremental: Resets by halving counters in an incremental sweep
      reset = "periodic"

      # The incremental reset interval (the number of additions before halving counters)
      increment = 16
    }
  }

  window-tiny-lfu {
    # The percentage for the MAIN space (PROBATION + PROTECTED)
    percent-main = ["0.99"]
    # The percentage for the PROTECTED MAIN queue
    percent-main-protected = "0.80"
    # The percentage of the hottest entries where the PROTECTED move is skipped
    percent-fast-path = "0.0" # "0.05" is reasonable
  }

  simple-window-tiny-lfu {
    # The percentage for the MAIN queue
    percent-main = ["0.99"]
    # The percentage of the hottest entries where the MAIN move is skipped
    percent-fast-path = "0.0" # "0.05" is reasonable
  }

  random-window-tiny-lfu {
    # The percentage for the MAIN space
    percent-main = ["0.99", "0.95", "0.90", "0.85", "0.80"]
  }

  fully-segmented-window-tiny-lfu {
    # The percentage for the MAIN space (PROBATION + PROTECTED)
    percent-main = ["0.99"]
    # The percentage for the PROTECTED MAIN queue
    percent-main-protected = "0.80"
    # The percentage for the PROTECTED EDEN queue
    percent-eden-protected = "0.80"
    # The percentage of the hottest entries where the PROTECTED MAIN move is skipped
    percent-fast-path = "0.0" # "0.05" is reasonable
  }

  s4-window-tiny-lfu {
    # The percentage for the MAIN queue
    percent-main = ["0.99"]
    # The number of segments in the MAIN space
    levels = 4
  }

  lirs {
    # The percentage for the HOT queue
    percent-hot = "0.99"
    # The multiple of the maximum size dedicated to non-resident entries
    non-resident-multiplier = "2.0"
    # The percentage of the hottest entries where the stack move is skipped
    percent-fast-path = "0.0" # "0.05" is reasonable
  }

  ehcache2 {
    # Policies: Lru, Lfu, Fifo, Clock
    policy = "lru"
  }

  infinispan {
    # Policies: Lru, Lirs
    policy = "lirs"
  }

  tcache {
    # Policies: Lru, Lfu
    policy = "lfu"
  }

  # files: reads from the trace file(s)
  # synthetic: reads from a synthetic generator
  source = "files"

  files {
    # The paths to the trace files, or the file names if in the format's package
    paths = [ "multi1.trace.gz" ]

    # address: format of UCSD program address traces
    # arc: format from the authors of the ARC algorithm
    # lirs: format from the authors of the LIRS algorithm
    # wikipedia: format from the WikiBench request traces
    # umass-storage: format from the University of Massachusetts storage traces
    format = "lirs"
  }

  synthetic {
    # The number of events to generate
    events = 10000

    # counter, uniform, exponential, hotspot, zipfian, scrambled-zipfian, or skewed-zipfian-latest
    distribution = "scrambled-zipfian"

    # A sequence of unique integers starting from...
    counter.start = 1

    # A sequence that is generated from the specified set uniformly randomly
    uniform {
      lower-bound = 1
      upper-bound = 1000
    }

    # A sequence based on an exponential distribution with a mean arrival rate of gamma
    exponential.mean = 1.0

    # A sequence resembling a hotspot distribution where x% of operations access y% of data items
    hotspot {
      # The lower bound of the distribution
      lower-bound = 1
      # The upper bound of the distribution
      upper-bound = 1000
      # The percentage of the of the interval which comprises the hot set
      hotset-fraction = 0.25
      # The percentage of operations that access the hot set
      hot-opn-fraction = 0.25
    }

    # A sequence where some items are more popular than others, according to a zipfian distribution
    zipfian {
      # The number of items
      items = 5000

      # A zipfian sequence that scatters the "popular" items across the item space. Use if you don't
      # want the head of the distribution (the popular items) clustered together.
      scrambled {}

      # A zipfian sequence with a popularity distribution of items, skewed to favor recent items
      # significantly more than older items
      skewed-zipfian-latest {}
    }
  }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy