io.questdb.site.conf.server.conf Maven / Gradle / Ivy
# number of worker threads shared across the application. Increasing this number will increase parallelism in the application at the expense of CPU resources
#shared.worker.count=2
# comma-delimited list of CPU ids, one per thread specified in "shared.worker.count". By default, threads have no CPU affinity
#shared.worker.affinity=
# toggle whether worker should stop on error
#shared.worker.haltOnError=false
################ HTTP settings ##################
# enable HTTP server
#http.enabled=true
# IP address and port of HTTP server
#http.bind.to=0.0.0.0:9000
# initial size of the connection pool
#http.connection.pool.initial.capacity=16
# initial size of the string pool shared by HttpHeaderParser and HttpMultipartContentParser
#http.connection.string.pool.capacity=128
# HeaderParser buffer size in bytes
#http.multipart.header.buffer.size=512
# how long code accumulates incoming data chunks for column and delimiter analysis
#http.multipart.idle.spin.count=10000
# size of receive buffer
#http.receive.buffer.size=1m
#http.request.header.buffer.size=64k
#http.worker.count=0
#http.worker.affinity=
#http.worker.haltOnError=false
# size of send data buffer
#http.send.buffer.size=2m
# name of index file
#http.static.index.file.name=index.html
# sets the clock to always return zero
#http.frozen.clock=false
#http.allow.deflate.before.send=false
## When you using SSH tunnel you might want to configure
## QuestDB HTTP server to switch to HTTP/1.0
## Set HTTP protocol version to HTTP/1.0
#http.version=HTTP/1.1
## Set server keep alive to 'false'. This will make server disconnect client after
## completion of each request
#http.server.keep.alive=true
## When in HTTP/1.0 mode keep alive values must be 0
#http.keep-alive.timeout=5
#http.keep-alive.max=10000
#http.static.public.directory=public
#http.net.active.connection.limit=256
#http.net.event.capacity=1024
#http.net.io.queue.capacity=1024
#http.net.idle.connection.timeout=300000
#http.net.interest.queue.capacity=1024
#http.net.listen.backlog=256
#http.net.snd.buf.size=2m
#http.net.rcv.buf.size=2m
#http.text.date.adapter.pool.capacity=16
#http.text.json.cache.limit=16384
#http.text.json.cache.size=8192
#http.text.max.required.delimiter.stddev=0.1222d
#http.text.max.required.line.length.stddev=0.8
#http.text.metadata.string.pool.capacity=128
#http.text.roll.buffer.limit=8216576
#http.text.roll.buffer.size=1024
#http.text.analysis.max.lines=1000
#http.text.lexer.string.pool.capacity=64
#http.text.timestamp.adapter.pool.capacity=64
#http.text.utf8.sink.size=4096
#http.json.query.connection.check.frequency=1000000
#http.json.query.float.scale=4
#http.json.query.double.scale=12
#http.security.readonly=false
#http.security.max.response.rows=Long.MAX_VALUE
# Switch to turn on terminating SQL processing if the HTTP connection is closed,
# the mechanism affects performance so the connection is only checked after http.security.interruptor.iterations.per.check calls are made to the check method.
# the mechanism also reads \r\n from the input stream and discards it since some HTTP clients send this as a keep alive in between requests, http.security.interruptor.buffer.size denotes the size of the buffer for this.
#http.security.interrupt.on.closed.connection=true
#http.security.interruptor.iterations.per.check=2000000
#http.security.interruptor.buffer.size=32
## HTTP MIN settings
##
## Use this port to health check QuestDB instance when it isn't desired to log these health check requests. This is sort of /dev/null for monitoring
# http.min.enabled=true
# http.min.bind.to=0.0.0.0:9013
################ Cairo settings ##################
# directory for storing db tables and metadata. this directory is inside the server root directory provided at startup
#cairo.root=db
# how changes to table are flushed to disk upon commit - default: nosync. Choices: nosync, async (flush call schedules update, returns immediately), sync (waits for flush to complete)
#cairo.commit.mode=nosync
# number of types table creation or insertion will be attempted
#cairo.create.as.select.retry.count=5
# type of map uses. Options: 1. fast (speed at the expense of storage. this is the default option) 2. compact
#cairo.default.map.type=fast
# when true, symbol values will be cached on Java heap
#cairo.default.symbol.cache.flag=false
# when column type is SYMBOL this parameter specifies approximate capacity for symbol map.
# It should be equal to number of unique symbol values stored in the table and getting this
# value badly wrong will cause performance degradation. Must be power of 2
#cairo.default.symbol.capacity=256
# number of attempts to open files
#cairo.file.operation.retry.count=30
# how often the writer maintenance job gets run, in milliseconds
#cairo.idle.check.interval=300000
# defines frequency with which the reader pool checks for inactive readers. In milliseconds
#cairo.inactive.reader.ttl=-10000
# defines frequency with which the writer pool checks for inactive readers. In milliseconds
#cairo.inactive.writer.ttl=-10000
# approximation of number of rows for single index key, must be power of 2
#cairo.index.value.block.size=256
# number of attempts to open swap file
#cairo.max.swap.file.count=30
# file permission for new directories
#cairo.mkdir.mode=509
# minimum number of rows before allowing use of parallel indexation
#cairo.parallel.index.threshold=100000
# number of attempts to get TableReader
#cairo.reader.pool.max.segments=5
# timeout when attempting to get BitmapIndexReaders. In microsecond
#cairo.spin.lock.timeout=1000000
# queries are cached. This prop sets the number of rows for the query cache
#cairo.cache.rows=16
# sets the number of blcoks for the query cache
#cairo.cache.blocks=4
# sets size of the CharacterStore
#cairo.character.store.capacity=1024
# Sets size of the CharacterSequence pool
#cairo.character.store.sequence.pool.capacity=64
# sets size of the Column pool in the SqlCompiler
#cairo.column.pool.capacity=4096
# load factor for CompactMaps
#cairo.compact.map.load.factor=0.7
# size of the ExpressionNode pool in SqlCompiler
#cairo.expression.pool.capacity=8192
# load factor for all FastMaps
#cairo.fast.map.load.factor=0.5
# size of the JoinContext pool in SqlCompiler
#cairo.sql.join.context.pool.capacity=64
# size of FloatingSequence pool in GenericLexer
#cairo.lexer.pool.capacity=2048
# sets the Key capacity in FastMap and CompactMap
#cairo.sql.map.key.capacity=2048 * 1024
# number of map resizes in FastMap and CompactMap before a resource limit exception is thrown, each resize doubles the previous size
#cairo.sql.map.max.resizes=2^31
# memory page size for FastMap and CompactMap
#cairo.sql.map.page.size=4m
# memory max pages for CompactMap
#cairo.sql.map.max.pages=2^31
# sets the size of the QueryModel pool in the SqlCompiler
#cairo.model.pool.capacity=1024
# sets the memory page size for storing keys in LongTreeChain
#cairo.sql.sort.key.page.size=4m
# max number of pages for storing keys in LongTreeChain before a resource limit exception is thrown
# cairo.sql.sort.key.max.pages=2^31
# sets the memory page size and max pages for storing values in LongTreeChain
#cairo.sql.sort.light.value.page.size=1048576
#cairo.sql.sort.light.value.max.pages=2^31
# sets the memory page size and max pages of the slave chain in full hash joins
#cairo.sql.hash.join.value.page.size=16777216
#cairo.sql.hash.join.value.max.pages=2^31
# sets the number of rows for latest By ###
#cairo.sql.latest.by.row.count=1000
# sets the memory page size and max pages of the slave chain in light hash joins
#cairo.sql.hash.join.light.value.page.size=1048576
#cairo.sql.hash.join.light.value.max.pages=2^31
# sets memory page size and max pages of file storing values in SortedRecordCursorFactory
#cairo.sql.sort.value.page.size=16777216
#cairo.sql.sort.value.max.pages=2^31
# latch await timeout in nanoseconds for stealing indexing work from other threads
#cairo.work.steal.timeout.nanos=10000
# whether parallel indexation is allowed. Works in conjunction with cairo.parallel.index.threshold
#cairo.parallel.indexing.enabled=true
# memory page size for JoinMetadata file
#cairo.sql.join.metadata.page.size=16384
# number of map resizes in JoinMetadata before a resource limit exception is thrown, each resize doubles the previous size
#cairo.sql.join.metadata.max.resizes=2^31
# size of AnalyticColumn pool in SqlParser
#cairo.sql.analytic.column.pool.capacity=64
# size of CreateTableModel pool in SqlParser
#cairo.sql.create.table.model.pool.capacity=16
# size of ColumnCastModel pool in SqlParser
#cairo.sql.column.cast.model.pool.capacity=16
# size of RenameTableModel pool in SqlParser
#cairo.sql.rename.table.model.pool.capacity=16
# size of WithClauseModel pool in SqlParser
#cairo.sql.with.clause.model.pool.capacity=128
# size of InsertModel pool in SqlParser
#cairo.sql.insert.model.pool.capacity=64
# size of CopyModel pool in SqlParser
#cairo.sql.copy.model.pool.capacity=32
# size of buffer used when copying tables
#cairo.sql.copy.buffer.size=2m
# cairo.sql.double.cast.scale=12
#cairo.sql.float.cast.scale=4
# name of file with user's set of date and timestamp formats
#cairo.sql.copy.formats.file=/text_loader.json
# input root directory for backups
#cairo.sql.copy.root=null
# output root directory for backups
#cairo.sql.backup.root=null
# date format for backup directory
#cairo.sql.backup.dir.datetime.format=yyyy-MM-dd
# name of temp directory used during backup
#cairo.sql.backup.dir.tmp.name=tmp
# permission used when creating backup directories
#cairo.sql.backup.mkdir.mode=509
#cairo.date.locale=en
#cairo.timestamp.locale=en
# Maximum number of uncommitted rows in TCP ilp
#cairo.o3.max.uncommitted.rows=1000
################ LINE UDP settings ##################
#line.udp.bind.to=0.0.0.0:9009
#line.udp.join=232.1.2.3
#line.udp.commit.rate=1000000
#line.udp.msg.buffer.size=2048
#line.udp.msg.count=10000
#line.udp.receive.buffer.size=8m
#line.udp.enabled=true
#line.udp.own.thread.affinity=-1
#line.udp.own.thread=false
#line.udp.unicast=false
#line.udp.commit.mode
#line.udp.timestamp=n
######################### LINE TCP settings ###############################
#line.tcp.enabled=true
#line.tcp.net.active.connection.limit=10
#line.tcp.net.bind.to=0.0.0.0:9009
#line.tcp.net.event.capacity=1024
#line.tcp.net.io.queue.capacity=1024
#line.tcp.net.idle.timeout=0
#line.tcp.net.interest.queue.capacity=1024
#line.tcp.net.listen.backlog=50000
#line.tcp.net.recv.buf.size=-1
#line.tcp.connection.pool.capacity=64
#line.tcp.timestamp=n
#line.tcp.default.partition.by=DAY
# TCP message buffer size
#line.tcp.msg.buffer.size=2048
# Max measurement size,
#line.tcp.max.measurement.size=2048
# Size of the queue between the IO jobs and the writer jobs, each queue entry represents a measurement
#line.tcp.writer.queue.capacity=128
# IO and writer job worker pool settings, 0 indicates the shared pool should be used
#line.tcp.writer.worker.count=0
#line.tcp.writer.worker.affinity=
#line.tcp.writer.worker.yield.threshold=10
#line.tcp.writer.worker.sleep.threshold=10000
#line.tcp.writer.halt.on.error=false
#line.tcp.io.worker.count=0
#line.tcp.io.worker.affinity=
#line.tcp.io.worker.yield.threshold=10
#line.tcp.io.worker.sleep.threshold=10000
#line.tcp.io.halt.on.error=false
#line.tcp.io.aggressive.recv=false
# Number of updates (per table) between attempts to rebalance the load between the writer workers
#line.tcp.n.updates.per.load.balance=10000
# Maximum load ratio (max loaded worker/min loaded worker) before questdb will attempt to rebalance the load between the writer workers
#line.tcp.max.load.ratio=1.9
# Maximum amount of time in between maintenance jobs, these will commit uncommited data
#line.tcp.maintenance.job.hysteresis.in.ms=1000
# Minimum amount of idle time before a table writer is released
#line.tcp.min.idle.ms.before.writer.release=30000
################ PG Wire settings ##################
#pg.enabled=true
#pg.net.active.connection.limit=10
#pg.net.bind.to=0.0.0.0:8812
#pg.net.event.capacity=1024
#pg.net.io.queue.capacity=1024)
#pg.net.idle.timeout=300000
#pg.net.interest.queue.capacity=1024
#pg.net.listen.backlog=50000
#pg.net.recv.buf.size=-1
#pg.net.send.buf.size=-1
#pg.character.store.capacity=4096
#pg.character.store.pool.capacity=64
#pg.connection.pool.capacity=64
#pg.password=quest
#pg.user=admin
#pg.factory.cache.column.count=16
#pg.factory.cache.row.count=16
#pg.idle.recv.count.before.giving.up=10000
#pg.idle.send.count.before.giving.up=10000
#pg.max.blob.size.on.query=512k
#pg.recv.buffer.size=1M
#pg.send.buffer.size=1M
#pg.date.locale=en
#pg.timestamp.locale=en
#pg.worker.count=2
#pg.worker.affinity=-1,-1;
#pg.halt.on.error=false
#pg.daemon.pool=true
################ Telemetry settings ##################
#telemetry.enabled=true
#telemetry.queue.capacity=512