All Downloads are FREE. Search and download functionalities are using the official Maven repository.

.splout-server.0.3.0.source-code.splout.properties.default Maven / Gradle / Ivy

Go to download

Splout SQL is a read only, horizontally scalable and partitioned SQL database that plays well with Hadoop.

The newest version!
#
# QNode properties
#

# The port this QNode will run on

qnode.port	4412

#
# Number of Thrift connections to each DNode allocated as a connection pool, in each QNode.
# This is the expected maximum number of parallel requests to each QNode.
#

qnode.dnode.pool.size	40

# Whether this QNode should find the next available port 
# in case "dnode.port" is busy or fail otherwise.

qnode.port.autoincrement	true

# The host this QNode will run on.
# Localhost will be substituted by the first valid private IP address.

qnode.host	localhost

# The number of succeessfully deployed versions that 
# will be kept in the system (per tablespace)

qnode.versions.per.tablespace	10

# The number of seconds to wait before checking each time
# if a DNode has failed or if timeout has ocurred in the middle of a deploy

qnode.deploy.seconds.to.check.error	60

# A fixed amount of time (seconds) that this QNode will wait before taking certain actions. For example, a QNode may decide to
# re-balance under-replicated partitions. But if we are at cluster start, some DNodes may still be connecting or are
# to connect in the near future. For that it is useful to wait this "warming time" before deciding such things.

qnode.warming.time	60

# Whether or not to enable automatic replica balancing.
# If automatic replica balancing is enabled, the system will become highly available so that
# if one replica is down, it will copy other replicas to another DNode so that the affected
# partitions don't remain under-replicated. As a downside, replica balancing might use more disk space than desired.

qnode.enable.replica.balance	false

# The TTL (in seconds) of balance actions: if they take more than this number of seconds they will be evicted.
# This is necessary because there are a number of race conditions that might block certain actions (i.e. final DNode
# going down in the middle of the transfer). Default is 10 hours.

qnode.balance.actions.ttl	36000

#
# DNode properties
#

# This DNode's port

dnode.port	4422

# Whether this DNode should find the next available port in case "dnode.port" is busy or fail otherwise

dnode.port.autoincrement	true

# How many threads will be allocated for serving requests in Thrift's THsHaServer.
# Usually this will be a factor of the number of cores in the machine.

dnode.serving.threads	10

# This DNode's host name.
# Localhost will be substituted by the first valid private IP address.

dnode.host	localhost

# The data folder that will be used for storing deployed SQL data stores

dnode.data.folder	./dnode-staging

# The amount of seconds that the DNode will cache SQL connection pools. After that time, it will close them.
# Remember that the DNode may receive requests for different versions in the middle of a deployment,
# so that's why we want to expire connection pools after some time (to not cache connection pools that will not be used anymore).
	 
dnode.pool.cache.seconds	3600

# Number of SQL connection pools that will be cached. There will be one SQL connection pool for each tablespace,
# version and partition that this DNode serves. So this number must not be smaller than the different numbers of
# tablespace + version + partitions.

dnode.pool.cache.n.elements	128

# The amount of seconds that the DNode will wait before canceling a too-long deployment
# Default is 10 hours

dnode.deploy.timeout.seconds	36000

#  A hard limit on the number of results per each SQL query that this DNode may send back to QNodes

dnode.max.results.per.query	50000

# If set, this DNode will listen for test commands. This property is used to activate
# responsiveness to some commands that are useful for integration testing: making a DNode 
# shutdown, etc.
 
dnode.handle.test.commands	false

# Queries that run for more than this time will be interrupted. Must be greater than 1000.

dnode.max.query.time	15000

#
# In milliseconds, queries that are slower will be logged with a WARNING. 
#
	 
dnode.slow.query.abs.limit	2500

# The amount of parallel downloads that are allowed per each deployment

dnode.deploy.parallelism  3

#
# DNode Data Fetcher Properties
# (The DNode Data Fetcher is in charge of downloading the files under a new deploy request)
#

# If using S3 fetching, specify here your AWS credentials.
# Uncomment when needed.

#fetcher.s3.access.key	ACCESSKEY
#fetcher.s3.secret.key	SECRETKEY

# The local folder that will be used to download new deployments
# This folder is also used by the HTTP File exchanger module
fetcher.temp.dir	fetcher-tmp

# The size in bytes of the in-memory buffer used to download files 
# This property is the same for the HTTP File exchanger module
fetcher.download.buffer	1048576

# (Deprecated) Whether to use throttling (> 0) or not. If so, number of bytes per second to throttle.
fetcher.bytes.per.sec.throttle	-1

# Everytime this number of bytes have been fetched from a file, a report will be made so that
# the DNode can calculate the transferring speed and put the info into Hazelcast.
# This shouldn't be too low - otherwise it would overload the network!
# Leave default value in case of doubt.
fetcher.bytes.to.report.progress	268435456

#
# If using Hadoop fetching, the address of the NameNode for 
# being able download data from HDFS. Uncomment when needed.
#
#fetcher.hadoop.fs.name=...
 
#
# DNode HTTP File exchanger Properties.
# (The HTTP File exchanger sends binary files between DNodes for auto-balancing under-replicated partitions).
#

# The number of serving threads of the HTTP server / client (maximum files that can be received / transferred in parallel).
http.exchanger.threads.server=2
http.exchanger.threads.client=2

# The allowed maximum backlog for the HTTP server.
http.exchanger.backlog=0

# The port where the HTTP file exchanger will bind to.
http.exchanger.port=4432

# Whether or not to use auto-increment for the HTTP port in case it is already busy 
http.exchanger.port.auto.increment=true

#
# Hazelcast properties
#

# Folder to be used to persist Hazelcast state information
# Needed to persist current version information.
# If not present, no information is stored

hz.persistent.data.folder=hz-data

# Enable this property if you want your service to bind to an specific port. Otherwise the default Hazelcast port is used (5701), and auto-incremented if needed.

#hz.port=XXXX

# Use this property to configure Hazelcast join in one or other way.
# Possible values: MULTICAST, TCP, AWS

hz.join.method=multicast

# Uncomment and use these properties if method=MULTICAST and fine tuning is needed:

#hz.multicast.group=
#hz.multicast.port=

# Uncomment and use this property if method=TCP:

#hz.tcp.cluster=1.1.1.1,2.2.2.2

# Uncomment and use this property if method=AWS and only a certain security group is to be examined:

#hz.aws.security.group=MySecurityGroup

# Also don't forget your AWS credentials if you use method=AWS:

#hz.aws.key=MYKEY
#hz.aws.secret=MYSECRET

# Modifies the standard backup count. Affects the replication factor of distributed maps.

hz.backup.count=3

# Hazelcast waits 5 seconds before joining a member. That is good in production
# because improves the posibilities of joining several members at the same time.
# But very bad for testing... This property allows you to disable it for testing.

hz.disable.wait.when.joining=false

# Number of the oldest members leading operations in the cluster. 
# Sometimes only these members answer to events, in order to reduce
# coordination traffic.
 
hz.oldest.members.leading.count=3

# Max time, in minutes, to check if the member is registered. This check is used
# to assure eventual consistency in rare cases of network partitions where replication
# was not enough to ensure that no data is lost.
   
hz.registry.max.time.to.check.registration=5




© 2015 - 2025 Weber Informatics LLC | Privacy Policy