![JAR search and dependency download from the Maven repository](/logo.png)
asp-core_2.11.2.35.0-cdp719.source-code.reference.conf Maven / Gradle / Ivy
wasp {
# framework-related configuration
actor-system-name = "WASP"
configuration-mode = "legacy" #Available values are: legacy, local, namespaced://namespacename
actor-downing-timeout-millis = 10000 # do not change unless you know what you're doing
environment {
validationRulesToIgnore = [] # array of validation-rule's keys, do not change unless you know what you're doing
mode = "production" # production, develop
prefix = "" # should not contain space or /. chars
}
systempipegraphs.start = true # whether to automatically start system pipegraphs
systemproducers.start = true # whether to automatically start system producers
index-rollover = false
general-timeout-millis = 60000
services-timeout-millis = 15000
darwinConnector = "" #possible value is hbase, postgres
avroSchemaManager {
wasp-manages-darwin-connectors-conf = true
darwin {}
}
# for documentation, see
telemetry {
writer = "default"
latency.sample-one-message-every = 100
topic = {
name = "telemetry"
partitions = 3
replica = 1
others = [
{"batch.size" = "1048576"}
{"acks" = "0"}
]
jmx = [
{
query = "java.lang:*"
metricGroupAttribute = "name"
sourceIdAttribute = "unknown"
metricGroupFallback = "jvm"
sourceIdFallback = "jvm"
}
{
query = "kafka.producer:*"
metricGroupAttribute = "type"
sourceIdAttribute = "client-id"
}
{
query = "kafka.consumer:*"
metricGroupAttribute = "type"
sourceIdAttribute = "client-id"
}
{
query = "\"org.apache.hadoop.hbase.client\":*"
metricGroupAttribute = "name"
sourceIdAttribute = "scope"
}
]
}
}
rest {
server {
hostname = "localhost"
port = 2891
https = {
active = false
keystore-location = "/path/to/file"
password-location = "/path/to/file/with/singleline/pwd"
keystore-type = "PKCS12"
}
}
}
datastore {
indexed = "solr"
keyvalue = "hbase"
}
akka {
loglevel = "DEBUG"
loggers = ["akka.event.slf4j.Slf4jLogger"]
logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
logger-startup-timeout = 60s
log-dead-letters = off
log-dead-letters-during-shutdown = off
remote {
log-remote-lifecycle-events = off
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp {
port = 2892
hostname = "localhost"
}
}
actor {
provider = "akka.cluster.ClusterActorRefProvider"
}
cluster {
log-info = on
seed-nodes = ["akka.tcp://WASP@localhost:2892"]
gossip-interval = 5s
publish-stats-interval = 10s
metrics.gossip-interval = 10s
metrics.collect-interval = 10s
}
}
# external-services-related configuration
mongo {
address = "mongodb://localhost:27017"
db-name = "wasp"
username = ""
password = ""
timeout = ${wasp.services-timeout-millis}
collection-prefix = ""
authentication-db = ${wasp.mongo.db-name}
}
kafka {
connections = [{
protocol = ""
host = "localhost"
port = 9092
timeout = ${wasp.services-timeout-millis}
metadata = []
}]
zookeeperConnections = [{
protocol = ""
host = "localhost"
port = 2181
timeout = ${wasp.services-timeout-millis}
metadata = []
}]
zkChRoot = "/kafka"
ingest-rate = "1s"
broker-id = 0
partitioner-fqcn = "org.apache.kafka.clients.producer.internals.DefaultPartitioner"
default-encoder = "kafka.serializer.DefaultEncoder"
key-encoder-fqcn = "org.apache.kafka.common.serialization.ByteArraySerializer"
encoder-fqcn = "org.apache.kafka.common.serialization.ByteArraySerializer"
decoder-fqcn = "org.apache.kafka.common.serialization.ByteArrayDeserializer"
batch-send-size = 0
acks = -1
use-new-client = false
#others = [
# # mandatory
# { "security.protocol" = "SASL_PLAINTEXT" }
# { "sasl.kerberos.service.name" = "kafka" }
# { "sasl.jaas.config" = "com.sun.security.auth.module.Krb5LoginModule required storeKey=true useKeyTab=true useTicketCache=false keyTab=\"./wasp2.keytab\" serviceName=\"kafka\" principal=\"wasp2@REALM\";" }
# { "sasl.mechanism" = "GSSAPI" }
# { "kafka.security.protocol" = "SASL_PLAINTEXT" }
# { "kafka.sasl.kerberos.service.name" = "kafka" }
# { "kafka.sasl.jaas.config" = "com.sun.security.auth.module.Krb5LoginModule required storeKey=true useKeyTab=true useTicketCache=false keyTab=\"./wasp2.keytab\" serviceName=\"kafka\" principal=\"wasp2@REALM\";" }
# { "kafka.sasl.mechanism" = "GSSAPI" }
#
# # optional
# { "sasl.kerberos.kinit.cmd" = "/usr/bin/kinit" }
# { "sasl.kerberos.min.time.before.relogin" = "60000" }
# { "sasl.kerberos.ticket.renew.jitter" = "0.05" }
# { "sasl.kerberos.ticket.renew.window.factor" = "0.8" }
# { "kafka.sasl.kerberos.kinit.cmd" = "/usr/bin/kinit" }
# { "kafka.sasl.kerberos.min.time.before.relogin" = "60000" }
# { "kafka.sasl.kerberos.ticket.renew.jitter" = "0.05" }
# { "kafka.sasl.kerberos.ticket.renew.window.factor" = "0.8" }
#]
}
additional-kafka-clusters {
}
spark-streaming {
app-name = "WASP-streaming"
master {
protocol = ""
host = "local[*]"
port = 0
}
driver-conf {
submit-deploy-mode = "client"
driver-cores = 1
driver-memory = "1G"
driver-hostname = "localhost"
driver-bind-address = "0.0.0.0"
driver-port = 0
kill-driver-process-if-spark-context-stops = false #Enables watchdog to self-destruct in case spark context
# stops and we are not able to recover
}
executor-cores = 2
executor-memory = "1G"
cores-max = 2 # used for Spark Standalone cluster manager (otherwise all available cores are assigned at the first one)
executor-instances = 1 # used only for Hadoop YARN cluster manager
additional-jars-path = "/root/wasp/lib"
yarn-jar = ""
block-manager-port = 0
broadcast-port = 0
fileserver-port = 0
retained-stages-jobs = 100
retained-tasks = 5000
retained-jobs = 100
retained-executions = 100
retained-batches = 100
enable-hive-support = false
kryo-serializer {
enabled = true
registrators = "" # comma-separated list of fully qualified names
strict = false
}
scheduling-strategy {
class-name = "it.agilelab.bigdata.wasp.consumers.spark.streaming.actor.master.FifoSchedulingStrategyFactory"
}
streaming-batch-interval-ms = 1000
checkpoint-dir = "/checkpoint" # localDevelop: "file:///checkpoint" -> consumers-spark-streaming container localFSPath, production: "/checkpoint" -> HDFS container path
#nifi-stateless {
# bootstrap ="hdfs:/path/to/bootstrap/jars"
# system="hdfs:/path/to/system/jars"
# stateless="hdfs:/path/to/stateless/jars"
# extensions="hdfs:/path/to/extensions/jars"
#}
#trigger-interval-ms = 200
#others = [
# { "spark.yarn.dist.files" = "file:///root/configurations/wasp2.keytab,file:///root/configurations/sasl.jaas.config" }
# { "spark.executor.extraJavaOptions" = "-Djava.security.auth.login.config=./sasl.jaas.config" }
# { "spark.authenticate" = "true" }
#]
}
spark-batch {
app-name = "WASP-batch"
master {
protocol = ""
host = "local[*]"
port = 0
}
driver-conf {
submit-deploy-mode = "client"
driver-cores = 1
driver-memory = "1G"
driver-hostname = "localhost"
driver-bind-address = "0.0.0.0"
driver-port = 0
kill-driver-process-if-spark-context-stops = false #Enables watchdog to self-destruct in case spark context
# stops and we are not able to recover
}
executor-cores = 2
executor-memory = "1G"
cores-max = 2 # used for Spark Standalone cluster manager (otherwise all available cores are assigned at the first one)
executor-instances = 1 # used only for Hadoop YARN cluster manager
additional-jars-path = "/root/wasp/lib"
yarn-jar = ""
block-manager-port = 0
broadcast-port = 0
fileserver-port = 0
retained-stages-jobs = 100
retained-tasks = 5000
retained-jobs = 100
retained-executions = 100
retained-batches = 100
kryo-serializer {
enabled = true
registrators = "" # comma-separated custom-KryoRegistrator list of fully qualified names
strict = false
}
#others = [
# { "spark.yarn.dist.files" = "file:///root/configurations/wasp2.keytab,file:///root/configurations/sasl.jaas.config" }
# { "spark.executor.extraJavaOptions" = "-Djava.security.auth.login.config=./sasl.jaas.config" }
# { "spark.authenticate" = "true" }
#]
}
elastic {
connections = [
{
protocol = ""
host = "localhost"
port = 9300
timeout = ${wasp.services-timeout-millis}
metadata = [
{"connectiontype": "binary"}
]
},
{
protocol = ""
host = "localhost"
port = 9200
timeout = ${wasp.services-timeout-millis}
metadata = [
{"connectiontype": "rest"}
]
}
]
}
solrcloud {
zookeeperConnections = [{
protocol = ""
host = "localhost"
port = 2181
timeout = ${wasp.services-timeout-millis}
metadata = []
}]
zkChRoot = "/solr"
}
hbase {
core-site-xml-path = "/etc/hadoop/conf/core-site.xml"
hbase-site-xml-path = "/etc/hadoop/conf/hbase-site.xml"
}
jdbc {
connections {
#CONNECTION_NAME {
# url = "jdbc:oracle:thin://HOST:PORT/DB"
# user = "USER"
# password = "PSW"
# driverName = "DRIVER_FULLY_QUALIFIED_NAME"
#}
# #Examples
#mysql {
# url = "jdbc:mysql://mysql:3306/test_db"
# user = "root"
# password = "psw"
# driverName = "com.mysql.jdbc.Driver"
#}
#oracle {
# url = "jdbc:oracle:thin://oracl:1521/ORCLCDB.localdomain"
# user = "user"
# password = "psw"
# driverName = "oracle.jdbc.driver.OracleDriver"
#}
}
}
nifi {
base-url = "http://localhost:8080"
api-path = "nifi-api"
ui-path = "nifi"
}
compiler {
max-instances = 2
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy