n-core_3.2.4.8.source-code.reference.conf Maven / Gradle / Ivy
##########################################
# Kamon Core Reference Configuration #
##########################################
kamon {
environment {
# Identifier for this service.
service = "kamon-application"
# Identifier for the host where this service is running. If set to `auto` Kamon will resolve the hostname using
# the resolved name for localhost.
host = "auto"
# Identifier for a particular instance of this service. If set to `auto` Kamon will use the pattern service@host.
instance = "auto"
# Arbitrary key-value pairs that further identify the environment where this service instance is running. Typically
# these tags will be used by the reporting modules as additional tags for all metrics or spans. Take a look at each
# reporter module's configuration to ensure these tags are supported and included in the reported data. Example:
#
# kamon.environment.tags {
# env = "staging"
# region = "us-east-1"
# }
tags {
}
}
# Modules that can be automatically discovered and started by Kamon. The configuration for each module has the
# following schema:
#
# kamon.modules {
# module-path {
# enabled = true
# name = "The Module Name"
# description = "A module description"
# factory = "com.example.ModuleFactory"
# }
# }
#
# All available modules in the classpath are started when calling Kamon.loadModules() and stopped when calling
# Kamon.stopModules(). When starting modules available on the classpath, Kamon will instantiate the configurated
# "factory" (must be an implementation of kamon.module.ModuleFactory) using a parameter-less constructor and passing
# in the configuration instance under the specified "configuration-path".
#
modules {
}
# Pool size for the executor service that will run sampling on RangeSampler instruments. This scheduler is accesible
# through Kamon.scheduler()
scheduler-pool-size = 2
metric {
# Interval at which metric snapshots will be collected and sent to all metric reporters.
tick-interval = 60 seconds
# When optimistic tick alignment is enabled the metrics ticker will try to schedule the ticks to happen as close as
# possible to round tick-interval units. E.g. if the tick-interval is set to 60 seconds then Kamon will try to
# schedule the ticks at the beginning of each minute; if the tick-interval is set to 20 seconds then Kamon will try
# to schedule the ticks at 0, 20, and 40 seconds of each minute. The alignment is not meant to be perfect, just to
# improve the ability to correlate the timestamp reported in ticks with logs.
optimistic-tick-alignment = yes
# Thread pool size used by the metrics refresh scheduler. This pool is only used to periodically sampling
# range-sampler values.
refresh-scheduler-pool-size = 2
factory {
# Default settings for metrics created by Kamon. The actual settings to be used when creating a metric are
# determined by merging the default settings, programatically provided settings and custom-settings using the
# following order (top wins):
#
# - any setting in the `custom-settings` section.
# - programatically provided settings when creating a metric.
# - `default-settings` below.
#
default-settings {
counter {
auto-update-interval = 10 seconds
}
gauge {
auto-update-interval = 10 seconds
}
histogram {
auto-update-interval = 10 seconds
lowest-discernible-value = 1
highest-trackable-value = 3600000000000
significant-value-digits = 2
}
timer {
auto-update-interval = 10 seconds
lowest-discernible-value = 1
highest-trackable-value = 3600000000000
significant-value-digits = 2
}
range-sampler {
auto-update-interval = 200 ms
lowest-discernible-value = 1
highest-trackable-value = 3600000000000
significant-value-digits = 2
}
}
# Custom settings for instruments of a given metric. The settings provided in this section override the default
# and manually provided settings when creating metrics. All settings are optional in this section and default
# values from the `kamon.metric.instrument-factory.default-settings` will be used in case of any setting being
# missing.
#
# Example:
# If you wish to change the highest trackable value setting of the `span.processing-time` metric, you should
# include the following configuration in your application.conf file:
#
# kamon.metric.factory.custom-settings {
# "span.processing-time" {
# highest-trackable-value = 5000
# }
# }
#
custom-settings {
}
}
}
trace {
# Interval at which sampled finished spans will be flushed to SpanReporters.
tick-interval = 10 seconds
# Size of the internal queue where sampled spans will stay until they get flushed. If the queue becomes full then
# sampled finished spans will be dropped in order to avoid consuming excessive amounts of memory. Each configured
# reporter has a separate queue.
reporter-queue-size = 4096
# Decide for how long should the tracer wait to report Spans after they are finished. This setting helps keeping
# Spans in memory for a few seconds after they finished, in case users want to manually override the sampling
# decision for the trace later on. Keep in mind that:
# - Overriding the sampling decision is a local-only action, any Spans from other connected services will not be
# retained or affected in any way.
# - This will introduce additional memory consumption proportional to the reporting delay and the Spans volume.
#
# Setting this value to "0 seconds" disables the reporting delay.
span-reporting-delay = 0 seconds
# Decide whether a new, locally created Span should have the same Span Identifier as it's remote parent (if any) or
# get a new local identifier. Certain tracing systems use the same Span Identifier to represent both sides (client
# and server) of a RPC call, if you are reporting data to such systems then this option should be enabled. This
# option only affects spans with the tag "span.kind=server".
#
# If you are using Zipkin, keep this option enabled. If you are using Jaeger, disable it.
join-remote-parents-with-same-span-id = no
# Configures what Scheme will be used for trace and span identifiers. The possible values are:
# single: Uses 8-byte identifiers for both traces and spans.
# double: Uses 16-byte identifiers for traces and 8-byte identifiers for spans.
# fqcn: You can use your own identifier scheme by providing its fully qualified class name in this setting. If
# a fqcn is provided please make sure that the provided class has a parameter-less constructor and take
# into account that even though tracing might be working perfectly, you should confirm whether your custom
# scheme will work fine with the SpanReporters that you plan to use.
identifier-scheme = single
# Decides whether to include the stack trace of a Throwable as the "error.stacktrace" Span tag when a Span is marked
# as failed.
include-error-stacktrace = yes
# Configures a sampler that decides which Spans should be sent to Span reporters. The possible values are:
# - always: report all traces.
# - never: don't report any trace.
# - random: randomly decide using the probability defined in the random-sampler.probability setting.
# - adaptive: keeps dynamic samplers for each operation while trying to achieve a set throughput goal.
# - A FQCN of a kamon.trace.Sampler implementation to be used instead. The implementation must have a default
# constructor that will be used by Kamon when creating the instance.
#
sampler = "adaptive"
# The random sampler uses the "chance" setting and a random number to take a decision, if the random number is
# on the upper (chance * 100) percent of the number spectrum the trace will be sampled. E.g. a chance of 0.01 will
# hint that 1% of all traces should be reported.
random-sampler {
# Probability of a span being sampled. Must be a value between 0 and 1.
probability = 0.01
}
# An adaptive sampler tries to balance a global throughput goal across all operations in the current application,
# making the best possible effort to provide sampled traces of all operations.
#
adaptive-sampler {
# The target maximum number of affirmative sample decisions to be taken by the sampler. The sampler will do a best
# effort to balance sampling decisions across all operations to produce no more than this number of affirmative
# decisions.
throughput = 600
# Groups allow to override the default balacing behavior for a particular subset of operations in this
# application. With groups, users can guarantee that certain operations will always be sampled, never sampled, or
# provide minimum and/or maximum sampled traces throughput goals.
#
# Groups have two properties: an operation name regex list which decides what operations get into the group and a
# set of rules for that group. The available rules are:
#
# sample: [always|never] Provides a definitive sampling decision for all operations in the group.
# When this rule is set, the sampler will always return this decision and any other
# configured rule will be ignored.
#
# minimum-throughput: [number] Defines the minimum number of sampled traces expected per minute for each
# operation in the group. Even though the sampler will do its best effort to provide the
# minimum number of sampled traces, the actual minimum will vary depending on application
# traffic and the global throughput goal.
#
# maximum-throughput: [number] Defines the maximum number of sampled traces expected per minute for each
# operation in the group, regardless of whether there is room left before meeting the global
# throughput goal.
#
# For example, if you wanted to ensure that health check operations are never sampled you could include
#
# health-checks {
# operations = ["GET \/status"]
#
# rules {
# sample = never
# }
# }
#
#
groups {
}
}
# Automatically change the sampling decision for a trace based on the either the number of errors in a trace, or a
# fixed latency threshold. The local tail sampler will only work when the `span-reporting-delay` setting is set to a
# value greater than zero.
#
# Important things to keep in mind:
# - The sampling decision override happens only in the current process. This means that any requests sent to other
# services before the sampling decision override happened might be missing from the resulting trace.
# - Try to set `span-reporting-delay` to a value slightly higher than your application's timeout to ensure all the
# local spans related to the trace will still be kept in memory when the sampling override happens
#
local-tail-sampler {
# Turns the local tail sampler logic on or off.
enabled = no
# Overrides the sampling decision to "Sample" when a trace contains at least this number of errors when the local
# Root Span finishes processing.
error-count-threshold = 1
# Overrides the sampling decision to "Sample" when the local Root Span took more than this time to complete.
latency-threshold = 1 second
}
# Settings that influence the tags applied to the "span.processing-time" metric for all finished spans with metric
# tracking enabled.
#
span-metric-tags {
# When this option is enabled the metrics collected for Spans will automatically add a tag named "upstream.name"
# to all Server and Consumer operation Spans with the name of the service that generated their parent Span, if
# available. Kamon propagates the "upstream.name" tag by default on all propagation channels.
upstream-service = yes
# When this option is enabled the metrics collected for Spans will automatically add a tag named "parentOperation"
# with the name of the operation on the parent Span, if any.
parent-operation = yes
}
# Configures hooks that can act on SpanBuilder and Span instances created by the Tracer. This allow users to, for
# example, track additional span-related metrics, modify operation names based on available Span information or put
# additional information on Spans.
#
hooks {
# List of FQCN of all "Tracer.PreStartHook" implementations to be used by the Tracer. All implementations must
# have a parameter-less constructor. Kamon ships with the "kamon.trace.Hooks$PreStart$FromContext" hook which can
# look for PreStart hooks on the current Context and apply them if available.
pre-start = [ "kamon.trace.Hooks$PreStart$FromContext" ]
# List of FQCN of all "Tracer.PreFinishHook" implementations to be used by the Tracer. All implementations must
# have a parameter-less constructor. Kamon ships with the "kamon.trace.Hooks$PreFinish$FromContext" hook which can
# look for PreFinish hooks on the current Context and apply them if available.
pre-finish = []
}
}
propagation {
http {
# Default HTTP propagation. Unless specified otherwise, all instrumentation will use the configuration on
# this section for HTTP context propagation.
#
default {
# Configures how context tags will be propagated over HTTP headers.
#
tags {
# Header name used to encode context tags.
header-name = "context-tags"
# Enables automatic inclusion of the "upstream.name" tag in outgoing requests.
include-upstream-name = yes
# Provides the means to filter which tags NOT to propagate as HTTP headers
# E.g having stored a tag in the context with the name 'mytag' one can exlude it from being propagated
# filter = ["mytag"]
# or using Glob patterns
# filter = ["my*"]
#
filter = []
# Provide explicit mappings between context tags and the HTTP headers that will carry them. When there is
# an explicit mapping for a tag, it will not be included in the default context header. For example, if
# you wanted to use the an HTTP header called `X-Correlation-ID` for a context tag with key `correlationID`
# you would need to include a the following configuration:
#
# mappings {
# correlationID = "X-Correlation-ID"
# }
#
# The correlationID tag would always be read and written from the `X-Correlation-ID` header. The context
# tag name is represented as the configuration key and the desired header name is represented by the
# cofiguration value.
#
mappings {
}
}
# Configure which entries should be read from incoming HTTP requests and written to outgoing HTTP requests.
#
entries {
# Specify mappings between Context keys and the Propagation.EntryReader[HeaderReader] implementation in charge
# of reading them from the incoming HTTP request into the Context.
incoming {
# The Propagation.EntryReader[HeaderReader] implementation used for Spans. Specify the FQCN of the
# implementation or one of the available shortcuts:
# - b3 for the B3 format (see https://github.com/openzipkin/b3-propagation)
# - b3-single for the Zipkin B3 single header format (see https://github.com/openzipkin/b3-propagation#single-header)
# - uber for the Uber/Jaeger format (see https://www.jaegertracing.io/docs/1.16/client-libraries/#propagation-format)
# - w3c for the W3C Trace Context format (see https://www.w3.org/TR/trace-context-1/) - requires identifier-scheme = double
#
span = "b3"
}
# Specify mappings betwen Context keys and the Propagation.EntryWriter[HeaderWriter] implementation in charge
# of writing them to outgoing HTTP requests.
outgoing {
# The Propagation.EntryWriter[HeaderWriter] implementation used for Spans. Specify the FQCN of the
# implementation or one of the available shortcuts:
# - b3 for the B3 format (see https://github.com/openzipkin/b3-propagation)
# - b3-single for the Zipkin B3 single header format (see https://github.com/openzipkin/b3-propagation#single-header)
# - uber for the Uber/Jaeger format (see https://www.jaegertracing.io/docs/1.16/client-libraries/#propagation-format)
# - w3c for the W3C Trace Context format (see https://www.w3.org/TR/trace-context-1/) - requires identifier-scheme = double
#
span = "b3"
}
}
}
}
binary {
# Default binaey propagation. Unless specified otherwise, all instrumentation will use the configuration on
# this section for binary context propagation.
#
default {
# Maximum outgoing Context size for binary transports. Contexts that surpass this limit will not be written to
# the outgoing medium.
max-outgoing-size = 2048
# Configures how context tags will be propagated over the binary transport.
#
tags {
# Enables automatic inclusion of the "upstream.name" tag in outgoing messages.
include-upstream-name = yes
}
# Configure which entries should be read from incoming messages and writen to outgoing messages.
#
entries {
# Specify mappings between Context keys and the Propagation.EntryReader[ByteStreamReader] implementation in
# charge of reading them from the incoming messages into the Context.
incoming {
span = "kamon.trace.SpanPropagation$Colfer"
}
# Specify mappings betwen Context keys and the Propagation.EntryWriter[ByteStreamWriter] implementation in
# charge of writing them on the outgoing messages.
outgoing {
span = "kamon.trace.SpanPropagation$Colfer"
}
}
}
}
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy