rics.kairosdb.kairosdb.1.2.3.source-code.kairosdb.properties Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of kairosdb Show documentation
Show all versions of kairosdb Show documentation
KairosDB is a fast distributed scalable time series abstraction on top of Cassandra.
kairosdb.telnetserver.port=4242
kairosdb.telnetserver.address=0.0.0.0
kairosdb.telnetserver.max_command_size=1024
# Properties that start with kairosdb.service are services that are started
# when kairos starts up. You can disable services in your custom
# kairosdb.properties file by setting the value to ie
# kairosdb.service.telnet=
kairosdb.service.telnet=org.kairosdb.core.telnet.TelnetServerModule
kairosdb.service.http=org.kairosdb.core.http.WebServletModule
kairosdb.service.reporter=org.kairosdb.core.reporting.MetricReportingModule
#===============================================================================
#Each factory must be bound in a guice module. The definition here defines what
#protocol data type the factory services.
#Default data point implementation for long - class must implement LongDataPointFactory
kairosdb.datapoints.factory.long=org.kairosdb.core.datapoints.LongDataPointFactoryImpl
#Default data point implementation for double - class must implement DoubleDataPointFactory
kairosdb.datapoints.factory.double=org.kairosdb.core.datapoints.DoubleDataPointFactoryImpl
kairosdb.datapoints.factory.string=org.kairosdb.core.datapoints.StringDataPointFactory
#===============================================================================
# Uses Quartz Cron syntax - default is to run every minute
kairosdb.reporter.schedule=0 */1 * * * ?
# TTL to apply to all kairos reported metrics
kairosdb.reporter.ttl=0
#===============================================================================
# Set to 0 to turn off HTTP port
kairosdb.jetty.port=8080
kairosdb.jetty.address=0.0.0.0
kairosdb.jetty.static_web_root=webroot
# To enable SSL uncomment the following lines and specify the path to the keyStore and its password and port
#kairosdb.jetty.ssl.port=443
#kairosdb.jetty.ssl.protocols=TLSv1, TLSv1.1, TLSv1.2
#kairosdb.jetty.ssl.cipherSuites=TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA
#kairosdb.jetty.ssl.keystore.path=
#kairosdb.jetty.ssl.keystore.password=
#To enable http basic authentication uncomment the following lines and specify
#the user name and password for authentication.
#kairosdb.jetty.basic_auth.user=
#kairosdb.jetty.basic_auth.password=
# To enable thread pooling uncomment the following lines and specify the limits
#kairosdb.jetty.threads.queue_size=6000
#kairosdb.jetty.threads.min=1000
#kairosdb.jetty.threads.max=2500
#kairosdb.jetty.threads.keep_alive_ms=10000
# To set the traffic type allowed for the server, change the kairosdb.server.type entry to INGEST, QUERY, or DELETE,
# or a comma delimited list of two of them to enable different sets of API methods.
# The default setting is ALL, which will enable all API methods.
#kairosdb.server.type=ALL
#===============================================================================
kairosdb.service.datastore=org.kairosdb.datastore.h2.H2Module
kairosdb.datastore.concurrentQueryThreads=5
#kairosdb.service.datastore=org.kairosdb.datastore.cassandra.CassandraModule
#kairosdb.service.datastore=org.kairosdb.datastore.remote.RemoteModule
#===============================================================================
#H2 properties
kairosdb.datastore.h2.database_path=target/h2db
#===============================================================================
#Cassandra properties
#host list is in the form> 1.1.1.1:9042,1.1.1.2
#if the port is omitted it defaults to 9042
kairosdb.datastore.cassandra.cql_host_list=localhost
kairosdb.datastore.cassandra.keyspace=kairosdb
#Sets the replication for the keyspace. This is only used the first time Kairos
#starts up and needs to create the schema in Cassandra. Later changes
#to this property have no effect.
kairosdb.datastore.cassandra.replication={'class': 'SimpleStrategy','replication_factor' : 1}
#For a single metric query this dictates the number of simultaneous cql queries
#to run (ie one for each partition key of data). The larger the cluster the higher you may want
#this number to be.
kairosdb.datastore.cassandra.simultaneous_cql_queries=20
# query_reader_threads is the number of threads to use to read results from
# each cql query. You may want to change this number depending on your environment
kairosdb.datastore.cassandra.query_reader_threads=6
# When set, the query_limit will prevent any query reading more than the specified
# number of data points. When the limit is reached an exception is thrown and an
# error is returned to the client. Set this value to 0 to disable (default)
#kairosdb.datastore.cassandra.query_limit=10000000
#Size of the row key cache size. This can be monitored by querying
#kairosdb.datastore.write_size and filtering on the tag buffer = row_key_index
#Ideally the data written to the row_key_index should stabilize to zero except
#when data rolls to a new row
kairosdb.datastore.cassandra.row_key_cache_size=50000
kairosdb.datastore.cassandra.string_cache_size=50000
#Control the required consistency for cassandra operations.
#Available settings are cassandra version dependent:
#http://www.datastax.com/documentation/cassandra/2.0/webhelp/index.html#cassandra/dml/dml_config_consistency_c.html
kairosdb.datastore.cassandra.read_consistency_level=ONE
kairosdb.datastore.cassandra.write_consistency_level=QUORUM
# Set this if this kairosdb node connects to cassandra nodes in multiple datacenters.
# Not setting this will select cassandra hosts using the RoundRobinPolicy, while setting this will use DCAwareRoundRobinPolicy.
#kairosdb.datastore.cassandra.local_datacenter=
kairosdb.datastore.cassandra.connections_per_host.local.core=5
kairosdb.datastore.cassandra.connections_per_host.local.max=100
kairosdb.datastore.cassandra.connections_per_host.remote.core=1
kairosdb.datastore.cassandra.connections_per_host.remote.max=10
kairosdb.datastore.cassandra.max_requests_per_connection.local=128
kairosdb.datastore.cassandra.max_requests_per_connection.remote=128
#This is the number of retries the Cassandra Client RetryPolicy will used before
#giving up on a cql query. A good rule is to set it to the number of replicas you have
kairosdb.datastore.cassandra.request_retry_count=1
kairosdb.datastore.cassandra.max_queue_size=500
#for cassandra authentication use the following
#kairosdb.datastore.cassandra.auth.[prop name]=[prop value]
#example:
#kairosdb.datastore.cassandra.auth.user_name=admin
#kairosdb.datastore.cassandra.auth.password=eat_me
# Set this property to true to enable SSL connections to your C* cluster.
# Follow the instructions found here: http://docs.datastax.com/en/developer/java-driver/3.1/manual/ssl/
# to create a keystore and pass the values into Kairos using the -D switches
kairosdb.datastore.cassandra.use_ssl=false
#the time to live in seconds for datapoints. After this period the data will be
#deleted automatically. If not set the data will live forever.
#TTLs are added to columns as they're inserted so setting this will not affect
#existing data, only new data.
#kairosdb.datastore.cassandra.datapoint_ttl=31536000
# Set this property to true to align each datapoint ttl with its timestamp.
# example: datapoint_ttl is set to 30 days; ingesting a datapoint with timestamp '25 days ago'
# - Without this setting, the datapoint will be stored for 30 days from now on, so it can be queried for 30 + 25 days which may not be the intended behaviour using a 30 days ttl
# - Setting this property to true will only store this datapoint for 5 days (30 - 25 days).
# default: false
# Additional note: consider setting force_default_datapoint_ttl as well for full control
kairosdb.datastore.cassandra.align_datapoint_ttl_with_timestamp=false
# Set this property to true to force the default datapoint_ttl for all ingested datapoints, effectively ignoring their ttl informations if they provide them.
# This gives you full control over the timespan the datapoints are stored in K*.
# default: false
# Additional note: consider setting align_datapoint_ttl_with_timestamp as well for full control
kairosdb.datastore.cassandra.force_default_datapoint_ttl=false
# Tells kairos to try and create the keyspace and tables on startup.
kairosdb.datastore.cassandra.create_schema=true
# Milliseconds for java driver to wait for a connection from a C* node
kairosdb.datastore.cassandra.connection_timeout=5000
# Milliseconds for java driver to wait for a response from a C* before giving up
kairosdb.datastore.cassandra.read_timeout=12000
#===============================================================================
# Remote datastore properties
# Load the RemoteListener modules instead of RemoteDatastore if you want to
# fork the flow of data. This module allows you to continue writting to your
# configured Datastore as well as send data on to a remote Kairos cluster
# Sample use case is to run clusters in parallel before migrating to larger cluster
# Cannot be used in conjunction with the RemoteModule
#kairosdb.service.remote=org.kairosdb.datastore.remote.ListenerModule
# Location to store data locally before it is sent off
kairosdb.datastore.remote.data_dir=.
kairosdb.datastore.remote.remote_url=http://10.92.1.41:8080
# quartz cron schedule for sending data (currently set to 30 min)
kairosdb.datastore.remote.schedule=0 */30 * * * ?
# delay the sending of data for a random number of seconds.
# this prevents all remote kairos nodes from sending data at the same time
# the default of 600 means the data will be sent every half hour plus some some
# delay up to 10 minutes.
kairosdb.datastore.remote.random_delay=0
# Optional prefix filter for remote module. If present, only metrics that start with the
# values in this comma-separated list are forwarded on.
#kairosdb.datastore.remote.prefix_filter=
#===============================================================================
#Uncomment this line to require oauth connections to http server
#kairosdb.service.oauth=org.kairosdb.core.oauth.OAuthModule
#OAuth consumer keys and secrets in the form
#kairosdb.oauth.consumer.[consumer key]=[consumer secret]
#===============================================================================
# Determines if cache files are deleted after being used or not.
# In some cases the cache file can be used again to speed up subsequent queries
# that query the same metric but aggregate the results differently.
kairosdb.query_cache.keep_cache_files=false
# Cache file cleaning schedule. Uses Quartz Cron syntax - this only matters if
# keep_cache_files is set to true
kairosdb.query_cache.cache_file_cleaner_schedule=0 0 12 ? * SUN *
#By default the query cache is located in kairos_cache under the system temp folder as
#defined by java.io.tmpdir system property. To override set the following value
#kairosdb.query_cache.cache_dir=
#===============================================================================
# Log long running queries, set this to true to record long running queries
# into kairos as the following metrics.
# kairosdb.log.query.remote_address - String data point that is the remote address
# of the system making the query
# kairosdb.log.query.json - String data point that is the query sent to Kairos
kairosdb.log.queries.enable=false
# Time to live to apply to the above metrics. This helps limit the mount of space
# used for storing the query information
kairosdb.log.queries.ttl=86400
# Time in seconds. If the query request time is longer than this value the query
# will be written to the above metrics
kairosdb.log.queries.greater_than=60
# When set to true the query stats are aggregated into min, max, avg, sum, count
# Setting to true will also disable the above log feature.
# Set this to true on Kairos nodes that receive large numbers of queries to save
# from inserting data witch each query
kairosdb.queries.aggregate_stats=false
#===============================================================================
# Health Checks
kairosdb.service.health=org.kairosdb.core.health.HealthCheckModule
#Response code to return from a call to /api/v1/health/check
#Some load balancers want 200 instead of 204
kairosdb.health.healthyResponseCode=204
#===============================================================================
#Ingest queue processor
#kairosdb.queue_processor.class=org.kairosdb.core.queue.MemoryQueueProcessor
kairosdb.queue_processor.class=org.kairosdb.core.queue.FileQueueProcessor
# The number of data points to send to Cassandra
# For the best performance you will want to set this to 10000 but first
# you will need to change the following values in your cassandra.yaml
# batch_size_warn_threshold_in_kb: 50
# batch_size_fail_threshold_in_kb: 70
# You may need to adjust the above numbers to fit your insert patterns.
# The CQL batch has a hard limit of 65535 items in a batch, make sure to stay
# under this as a single data point can generate more than one insert into Cassandra
# You will want to multiply this number by the number of hosts in the Cassandra
# cluster. A batch is pulled from the queue and then divided up depending on which
# host the data is destined for.
kairosdb.queue_processor.batch_size=200
# If the queue doesn't have at least this many items to process the process thread
# will pause for {min_batch_wait} milliseconds to wait for more before grabbing data from the queue.
# This is an attempt to prevent chatty inserts which can cause extra load on
# Cassandra
kairosdb.queue_processor.min_batch_size=100
# If the number of items in the process queue is less than {min_batch_size} the
# queue processor thread will wait this many milliseconds before flushing the data
# to C*. This is to prevent single chatty inserts. This only has effect when
# data is trickling in to Kairos.
kairosdb.queue_processor.min_batch_wait=500
# The size (number of data points) of the memory queue
# In the case of FileQueueProcessor:
# Ingest data is written to the memory queue as well as to disk. If the system gets
# behind the memory queue is overrun and data is read from disk until it can
# catch up.
# In the case of MemoryQueueProcessor it defines the size of the memory queue.
kairosdb.queue_processor.memory_queue_size=100000
# The number of seconds before checkpointing the file backed queue. In the case of
# a crash the file backed queue is read from the last checkpoint
# Only applies to the FileQueueProcessor
kairosdb.queue_processor.seconds_till_checkpoint=90
# Path to the file backed queue
# Only applies to the FileQueueProcessor
kairosdb.queue_processor.queue_path=queue
# Page size of the file backed queue 50Mb
# Only applies to the FileQueueProcessor
kairosdb.queue_processor.page_size=52428800
#Number of threads allowed to insert data to the backend
#CassandraDatastore is the only use of this executor
kairosdb.ingest_executor.thread_count=10
#===============================================================================
# Roll-ups
#kairosdb.service.rollups=org.kairosdb.rollup.RollUpModule
# How often the Roll-up Manager queries for new or updated roll-ups
#kairosdb.rollups.server_assignment.check_update_delay_millseconds=10000
#===============================================================================
#===============================================================================
# Host Manager Service
# How often the host service checks for active hosts
kairosdb.host_service_manager.check_delay_time_millseconds=60000
# How long before a host is considered inactive
kairosdb.host_service_manager.inactive_time_seconds=300
#===============================================================================
#===============================================================================
#Demo and stress modules
# The demo module will load one years of data into kairos. The code goes back
# one year from the present and inserts data every minute. The data makes a
# pretty little sign wave.
#kairosdb.service.demo=org.kairosdb.core.demo.DemoModule
kairosdb.demo.metric_name=demo_data
# number of rows = number of host tags to add to the data. Setting the number_of_rows
# to 100 means 100 hosts reporting data every minutes, each host has a different tag.
kairosdb.demo.number_of_rows=100
kairosdb.demo.ttl=0
# This just inserts data as fast as it can for the duration specified. Good for
# stress testing your backend. I have found that a single Kairos node will only
# send about 500k/sec because of a limitation in the cassandra client.
#kairosdb.service.blast=org.kairosdb.core.blast.BlastModule
# The number_of_rows translates into a random number between 0 and number_of_rows
# that is added as a tag to each data point. Trying to simulate a even distribution
# data in the cluster.
kairosdb.blast.number_of_rows=1000
kairosdb.blast.duration_seconds=30
kairosdb.blast.metric_name=blast_load
kairosdb.blast.ttl=600