nlpcraft.0.7.1.source-code.application.conf Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of nlpcraft Show documentation
Show all versions of nlpcraft Show documentation
An API to convert natural language into actions.
#
# "Commons Clause" License, https://commonsclause.com/
#
# The Software is provided to you by the Licensor under the License,
# as defined below, subject to the following condition.
#
# Without limiting other conditions in the License, the grant of rights
# under the License will not include, and the License does not grant to
# you, the right to Sell the Software.
#
# For purposes of the foregoing, "Sell" means practicing any or all of
# the rights granted to you under the License to provide to third parties,
# for a fee or other consideration (including without limitation fees for
# hosting or consulting/support services related to the Software), a
# product or service whose value derives, entirely or substantially, from
# the functionality of the Software. Any license notice or attribution
# required by the License must also include this Commons Clause License
# Condition notice.
#
# Software: NLPCraft
# License: Apache 2.0, https://www.apache.org/licenses/LICENSE-2.0
# Licensor: Copyright (C) 2018 DataLingvo, Inc. https://www.datalingvo.com
#
# _ ____ ______ ______
# / | / / /___ / ____/________ _/ __/ /_
# / |/ / / __ \/ / / ___/ __ `/ /_/ __/
# / /| / / /_/ / /___/ / / /_/ / __/ /_
# /_/ |_/_/ .___/\____/_/ \__,_/_/ \__/
# /_/
#
# REST server configuration root.
server {
# Apache Ignite database, which is used by default, doesn't require configuration.
# MySql, Postgres and Oracle databases can be configured instead.
database {
# jdbc {
# # MySQL URL and driver.
# # Note that MySQL driver dependencies should be added to the project,
# # and scripts sql/mysql should be executed.
# # Tested under MySQL 5.7.17.
# url = "jdbc:mysql://localhost:3306/nlpcraft"
# driver = com.mysql.jdbc.Driver
# username =
# password =
# }
# jdbc {
# # Postgres URL and driver.
# # Note that Postgres driver dependencies should be added to the project,
# # and scripts sql/postgres should be executed.
# # Tested under Postgres 9.6.9.
# url = "jdbc:postgresql://localhost:5432/nlpcraft"
# driver = org.postgresql.Driver
# username = nlpcraft
# password = nlpcraft
# }
# jdbc {
# # Oracle URL and driver.
# # Note that Oracle driver dependencies should be added to the project,
# # and scripts sql/oracle should be executed.
# # Tested under Oracle 11G (XE).
# url = "jdbc:oracle:thin:@localhost:1521:XE"
# driver = oracle.jdbc.driver.OracleDriver
# username = nlpcraft
# password = nlpcraft
# }
# Apache Ignite In-Memory Computing Platform (persistence).
jdbc {
# Ignite JDBC URL and driver.
url = "jdbc:ignite:thin://127.0.0.1/nlpcraft"
driver = org.apache.ignite.IgniteJdbcThinDriver
# username =
# password =
}
# Common JDBC connection pool for any supported database.
c3p0 {
maxStatements = 180
pool {
initSize = 10
minSize = 1
maxSize = 50
acquireIncrement = 2
}
}
}
# REST server configuration.
rest {
host = localhost
port = 8081
}
# User manager configuration.
user {
pwdPoolBlowup = 3
timeoutScannerFreqMins = 1
accessTokenExpireTimeoutMins = 60
}
# Plugin manager configuration.
plugins {
notification = "org.nlpcraft.server.notification.plugins.noop.NCNoopNotificationPlugin"
probe {
auth = "org.nlpcraft.server.probe.plugins.auth.basic.NCBasicProbeAuthenticationPlugin"
}
}
# 'NCRestPushNotificationPlugin' configuration.
# org.nlpcraft.server.notification.plugins.restpush.NCRestPushNotificationPlugin {
# flushSecs = 5
# maxBufferSize = 10000
# batchSize = 50
# endpoints = []
# }
# 'NCBasicProbeAuthenticationPlugin' configuration.
org.nlpcraft.server.probe.plugins.auth.basic.NCBasicProbeAuthenticationPlugin {
# NOTE: this probe token should match the probe token in the probe.
probe.token = "3141592653589793" # This is a default probe token (should be same in probe.conf files).
}
# Probe manager configuration.
probe {
links {
# Default endpoints.
#
# NOTES:
# ------
# (1) If changed - they need to be changed on both server and probes.
# (2) Don't use 'localhost' if server and probe(s) are on different hosts.
# This property can be overridden with system property
# or environment variable 'NLPCRAFT_SERVER_PROBE_UPLINK'.
upLink = "localhost:8201" # Server to probe data pipe.
# This property can be overridden with system property
# or environment variable 'NLPCRAFT_SERVER_PROBE_DOWNLINK'.
downLink = "localhost:8202" # Probe to server data pipe.
}
pingTimeoutMs = 2000
soTimeoutMs = 5000
reconnectTimeoutMs = 5000
poolSize = 100
}
# Endpoint manager configuration.
endpoint {
queue {
# Maximum queue size for all users.
maxSize = 100000
# Maximum queue size configuration for each user.
maxPerUserSize = 1000
# Period for queue size limit check.
checkPeriodMins = 5
}
# Sending delays (attempt by attempt) if previous sending was not successful.
# Fibonacci numbers. Last delay used until `lifetimeMins` expired.
delaysSecs = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
# Life time (TTL). How long the message will be trying to be sent (1 day).
lifetimeMins = 1440
}
# Default date formatting for 'nlpcraft:date' token detection only.
# Supported formats: MDY, DMY, YMD.
datesFormatStyle = MDY
# Enabled built-in token providers (each token represents a named entity category).
# User models can only use built-in tokens from the token providers configured here.
#
# Supported values:
# * 'nlpcraft' - Built-in NLPCraft tokens. Token IDs start with 'nlpcraft:'.
# * 'opennlp' - Apache OpenNLP (https://opennlp.apache.org). Token IDs start with 'opennlp:'.
# * 'stanford' - Stanford CoreNLP (https://stanfordnlp.github.io/CoreNLP). Token IDs start with 'stanford:'.
# * 'google' - Google Natural Language (https://cloud.google.com/natural-language). Token IDs start with 'google:'.
# * 'spacy' - Python NLP Library (https://spacy.io). Token IDs start with 'spacy:'.
#
# DO NOT confuse these token providers with underlying NLP engine ('opnenlp' or 'stanford')
# that is configured separately on both REST server and data probe. NLP engine is used only for the
# basic NLP processing such as tokenization, lemmatization, etc. NLP engines and supported token providers
# can be mixed and matched, i.e. 'stanford' NLP engine can be used with 'google' and 'opennlp' token providers.
#
# See Integrations section (https://nlpcraft.org/integrations.html) for details on how to
# configure 3rd party token providers.
tokenProviders = [
nlpcraft # By default - only NLPCraft tokens are enabled and can be used by the user data models.
]
}
# Akka configuration.
akka.http.server.remote-address-header=on
© 2015 - 2025 Weber Informatics LLC | Privacy Policy