io.questdb.site.conf.server.conf Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of core Show documentation
Show all versions of core Show documentation
QuestDB is High Performance Time Series Database
The newest version!
#number of worker threads shared across the application. Increasing this number will increase parallelism in the application at the expense of cpu resources
#shared.worker.count=2
# comma-delimted list of cpus ids, one per thread specified in "shared.worker.count". By default, threads have no cpu affinity
#shared.worker.affinity=
# toggle whether worker should stop on error
#shared.worker.haltOnError=false
################ HTTP settings ##################
# enable HTTP server
#http.enabled=true
#IP address and port of HTTP server
#http.bind.to=0.0.0.0:9000
# initial size of the connection pool
#http.connection.pool.initial.capacity=16
# initial size of the string pool shared by HttpHeaderParser and HttpMultipartContentParser
#http.connection.string.pool.capacity=128
# HeaderParser buffer size in bytes
#http.multipart.header.buffer.size=512
# how long code accumulates incoming data chunks for column and delimiter analysis
#http.multipart.idle.spin.count=10000
# size of receive buffer
#http.receive.buffer.size=1m
#http.request.header.buffer.size=64k
#http.response.header.buffer.size=32k
#http.worker.count=0
#http.worker.affinity=
#http.worker.haltOnError=false
#size of send data buffer
#http.send.buffer.size=2m
# name of index file
#http.static.index.file.name=index.html
# sets the clock to always return zero
#http.frozen.clock=false
#http.allow.deflate.before.send=false
#http.keep-alive.timeout=5
#http.keep-alive.max=10000
#http.static.pubic.directory=public
#
#http.net.active.connection.limit=256
#http.net.event.capacity=1024
#http.net.io.queue.capacity=1024
#http.net.idle.connection.timeout=300000
#http.net.interest.queue.capacity=1024
#http.net.listen.backlog=256
#http.net.snd.buf.size=2m
#http.net.rcv.buf.size=2m
#
#http.text.date.adapter.pool.capacity=16
#http.text.json.cache.limit=16384
#http.text.json.cache.size=8192
#http.text.max.required.delimiter.stddev=0.1222d
#http.text.max.required.line.length.stddev=0.8
#http.text.metadata.string.pool.capacity=128
#http.text.roll.buffer.limit=8216576
#http.text.roll.buffer.size=1024
#http.text.analysis.max.lines=1000
#http.text.lexer.string.pool.capacity=64
#http.text.timestamp.adapter.pool.capacity=64
#http.text.utf8.sink.size=4096
#
#http.json.query.connection.check.frequency=1000000
#http.json.query.float.scale=4
#http.json.query.double.scale=12
#
#http.security.readonly=false
#http.security.max.response.rows=Long.MAX_VALUE
# Switch to turn on terminating SQL processing if the HTTP connection is closed,
# the mechanism affects performance so the connection is only checked after http.security.interruptor.iterations.per.check calls are made to the check method.
# the mechanism also reads \r\n from the input stream and discards it since some HTTP clients send this as a keep alive in between requests, http.security.interruptor.buffer.size denotes the size of the buffer for this.
#http.security.interrupt.on.closed.connection=true
#http.security.interruptor.iterations.per.check=2000000
#http.security.interruptor.buffer.size=32
#
########################## CAIRO settings ############################
# directory for storing db tables and metadata. this directory is inside the server root directory provided at startup
#cairo.root=db
# how changes to table are flushed to disk upon commit - default: nosync. Choices: nosync, async (flush call schedules update, returns immediately), sync (waits for flush to complete)
#cairo.commit.mode=nosync
# number of types table creation or insertion will be attempted
#cairo.create.as.select.retry.count=5
# type of map uses. Options: 1. fast (speed at the expense of storage. this is the default option) 2. compact
#cairo.default.map.type=fast
# when true, symbol values will be cached on Java heap
#cairo.default.symbol.cache.flag=false
# when column type is SYMBOL this parameter specifies approximate capacity for symbol map.
# It should be equal to number of unique symbol values stored in the table and getting this
# value badly wrong will cause performance degradation. Must be power of 2
#cairo.default.symbol.capacity=256
# number of attempts to open files
#cairo.file.operation.retry.count=30
# how often the writer maintenance job gets run, in milliseconds
#cairo.idle.check.interval=300000
# defines frequency with which the reader pool checks for inactive readers. In milliseconds
#cairo.inactive.reader.ttl=-10000
# defines frequency with which the writer pool checks for inactive readers. In milliseconds
#cairo.inactive.writer.ttl=-10000
# approximation of number of rows for single index key, must be power of 2
#cairo.index.value.block.size=256
# number of attempts to open swap file
#cairo.max.swap.file.count=30
# file permission for new directories
#cairo.mkdir.mode=509
# minimum number of rows before allowing use of parallel indexation
#cairo.parallel.index.threshold=100000
# number of attemps to get TableReader
#cairo.reader.pool.max.segments=5
# timeout when attempting to get BitmapIndexReaders. In microsecond
#cairo.spin.lock.timeout=1000000
# Queries are cached. This prop sets the number of rows for the query cache
#cairo.cache.rows=16
# Sets the number of blcoks for the query cache
#cairo.cache.blocks=4
# sets size of the CharacterStore
#cairo.character.store.capacity=1024
# Sets size of the CharacterSequence pool
#cairo.character.store.sequence.pool.capacity=64
# sets size of the Column pool in the SqlCompiler
#cairo.column.pool.capacity=4096
# load factor for CompactMaps
#cairo.compact.map.load.factor=0.7
# size of the ExpressionNode pool in SqlCompiler
#cairo.expression.pool.capacity=8192
# load factor for all FastMaps
#cairo.fast.map.load.factor=0.5
# size of the JoinContext pool in SqlCompiler
#cairo.sql.join.context.pool.capacity=64
# size of FloatingSequence pool in GenericLexer
#cairo.lexer.pool.capacity=2048
# sets the Key capacity in FastMap and CompactMap
#cairo.sql.map.key.capacity=2048 * 1024
# number of map resizes in FastMap and CompactMap before a resource limit exception is thrown, each resize doubles the previous size
#cairo.sql.map.max.resizes=2^31
# memory page size for FastMap and CompactMap
#cairo.sql.map.page.size=4m
#memory max pages for CompactMap
#cairo.sql.map.max.pages=2^31
#sets the size of the QueryModel pool in the SqlCompiler
#cairo.model.pool.capacity=1024
# sets the memory page size for storing keys in LongTreeChain
#cairo.sql.sort.key.page.size=4m
# max number of pages for storing keys in LongTreeChain before a resource limit exception is thrown
# cairo.sql.sort.key.max.pages=2^31
# sets the memory page size and max pages for storing values in LongTreeChain
#cairo.sql.sort.light.value.page.size=1048576
#cairo.sql.sort.light.value.max.pages=2^31
# sets the memory page size and max pages of the slave chain in full hash joins
#cairo.sql.hash.join.value.page.size=16777216
#cairo.sql.hash.join.value.max.pages=2^31
# sets the number of rows for latest By ###
#cairo.sql.latest.by.row.count=1000
# sets the memory page size and max pages of the slave chain in light hash joins
#cairo.sql.hash.join.light.value.page.size=1048576
#cairo.sql.hash.join.light.value.max.pages=2^31
# sets memory page size and max pages of file storing values in SortedRecordCursorFactory
#cairo.sql.sort.value.page.size=16777216
#cairo.sql.sort.value.max.pages=2^31
# latch await timeout in nanos for stealing indexing work from other threads
#cairo.work.steal.timeout.nanos=10000
# whether parallel indexation is allowed. Works in conjunction with cairo.parallel.index.threshold
#cairo.parallel.indexing.enabled=true
# memory page size for JoinMetadata file
#cairo.sql.join.metadata.page.size=16384
# number of map resizes in JoinMetadata before a resource limit exception is thrown, each resize doubles the previous size
#cairo.sql.join.metadata.max.resizes=2^31
# size of AnalyticColumn pool in SqlParser
#cairo.sql.analytic.column.pool.capacity=64
# size of CreateTableModel pool in SqlParser
#cairo.sql.create.table.model.pool.capacity=16
# size of ColumnCastModel pool in SqlParser
#cairo.sql.column.cast.model.pool.capacity=16
# size of RenameTableModel pool in SqlParser
#cairo.sql.rename.table.model.pool.capacity=16
# size of WithClauseModel pool in SqlParser
#cairo.sql.with.clause.model.pool.capacity=128
# size of InsertModel pool in SqlParser
#cairo.sql.insert.model.pool.capacity=64
# size of CopyModel pool in SqlParser
#cairo.sql.copy.model.pool.capacity=32
# size of buffer used when copying tables
#cairo.sql.copy.buffer.size=2m
#cairo.sql.double.cast.scale=12
#cairo.sql.float.cast.scale=4
#name of file with user's set of date and timestamp formats
#cairo.sql.copy.formats.file=/text_loader.json
#input root directory for backups
#cairo.sql.copy.root=null
# output root directory for backups
#cairo.sql.backup.root=null
# date format for backup directory
#cairo.sql.backup.dir.datetime.format=yyyy-MM-dd
# name of tmp directory used during backup
#cairo.sql.backup.dir.tmp.name=tmp
# permission used when creating backup directories
#cairo.sql.backup.mkdir.mode=509
#cairo.date.locale=en
#cairo.timestamp.locale=en
######################### LINE UDP settings ###############################
#line.udp.bind.to=0.0.0.0:9009
#line.udp.join=232.1.2.3
#line.udp.commit.rate=1000000
#line.udp.msg.buffer.size=2048
#line.udp.msg.count=10000
#line.udp.receive.buffer.size=8m
#line.udp.enabled=true
#line.udp.own.thread.affinity=-1
#line.udp.own.thread=false
#line.udp.unicast=false
#line.udp.commit.mode
#line.udp.timestamp=n
###################### PG Wire settings #############################
#pg.enabled=true
#pg.net.active.connection.limit=10
#pg.net.bind.to=0.0.0.0:8812
#pg.net.event.capacity=1024
#pg.net.io.queue.capacity=1024)
#pg.net.idle.timeout=300000
#pg.net.interest.queue.capacity=1024
#pg.net.listen.backlog=50000
#pg.net.recv.buf.size=-1
#pg.net.send.buf.size=-1
#pg.character.store.capacity=4096
#pg.character.store.pool.capacity=64
#pg.connection.pool.capacity=64
#pg.password=quest
#pg.user=admin
#pg.factory.cache.column.count=16
#pg.factory.cache.row.count=16
#pg.idle.recv.count.before.giving.up=10000
#pg.idle.send.count.before.giving.up=10000
#pg.max.blob.size.on.query=512k
#pg.recv.buffer.size=1M
#pg.send.buffer.size=1M
#pg.date.locale=en
#pg.timestamp.locale=en
#pg.worker.count=2
#pg.worker.affinity=-1,-1;
#pg.halt.on.error=false
#pg.daemon.pool=true