config.init_min.properties Maven / Gradle / Ivy
The newest version!
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
kylin.metadata.url=${KYLIN_METADATA_URL}
kylin.metadata.audit-log.max-size=3000000
kylin.metadata.ops-cron=0 0 0 * * *
kylin.metadata.history-source-usage-cron=0 0 0 * * *
kylin.metadata.history-source-usage-unwrap-computed-column=true
# Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
kylin.env.hdfs-working-dir=${KYLIN_ENV_HDFS_WORKING_DIR}
# The build engine writes data to this file system, which is the same as defaultFS in core-site-xml by default
#kylin.env.engine-write-fs=
# zookeeper is used for distributed locked, service discovery, leader selection, etc.
# example: kylin.env.zookeeper-connect-string=10.1.2.1:2181,10.1.2.2:2181,10.1.2.3:2181
kylin.env.zookeeper-connect-string=${KYLIN_ENV_ZOOKEEPER_CONNECT_STRING}
kylin.env.channel=cloud
# save for InfluxDB
kylin.monitor.enabled=false
kylin.influxdb.address=localhost:8086
kylin.influxdb.username=root
kylin.influxdb.password=ENC('NyiG//i7loPFfdUoO9BFuA==')
kylin.influxdb.https.enabled=false
# daily operations
kylin.garbage.storage.executable-survival-time-threshold=30d
# ==================== SERVER & WEB ====================
# Kylin server mode, valid value [all, query, job]
kylin.server.mode=all
# KE server address, best to use HA domain name in PROD, used in yarn-cluster mode to update job info back to KE.
# eg. 'kylin.server.address=10.1.2.30:7070'
# Kylin server port
server.port=7070
# Kylin https
kylin.server.https.port=7443
kylin.server.https.keystore-type=JKS
kylin.server.https.keystore-file=${KYLIN_HOME}/server/.keystore
kylin.server.https.keystore-password=changeit
# Set true to enable CORS for any origins
kylin.server.cors.allow-all=false
# ==================== JOB EXECUTION ====================
### Spark conf overwrite for build engine
kylin.engine.spark-conf.spark.yarn.submit.file.replication=1
kylin.engine.spark-conf.spark.yarn.queue=default
kylin.engine.spark-conf.spark.driver.host=${HOST_IP}
kylin.engine.spark-conf.spark.driver.memoryOverhead=256
#kylin.engine.spark-conf.spark.driver.memory=512m
kylin.engine.spark-conf.spark.driver.cores=1
kylin.engine.spark-conf.spark.storage.memoryFraction=0.3
kylin.engine.spark-conf.spark.eventLog.enabled=true
kylin.engine.spark-conf.spark.port.maxRetries=128
kylin.engine.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/spark-history
kylin.engine.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/spark-history
kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${KYLIN_HOME}/logs
kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-appmaster-log4j.xml
kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=job -Dkap.spark.project=${job.project} -Dkap.spark.identifier=${job.id} -Dkap.spark.jobName=${job.stepId} -Duser.timezone=${user.timezone} -Dkap.spark.mountDir=${job.mountDir}
kylin.engine.spark-conf.spark.yarn.dist.files=${kylin.log.spark-executor-properties-file},${kylin.log.spark-appmaster-properties-file}
kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
kylin.engine.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch
kylin.engine.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true
kylin.engine.spark-conf.spark.sql.adaptive.enabled=true
kylin.engine.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER
kylin.engine.spark-conf.spark.sql.broadcastTimeout=999999
kylin.engine.spark-conf.spark.sql.cbo.enabled=true
kylin.engine.spark-conf.spark.sql.crossJoin.enabled=true
kylin.engine.spark-conf.spark.sql.sources.bucketing.enabled=false
kylin.engine.driver-memory-strategy=2,20,100
kylin.engine.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}
#kylin.engine.spark-conf.spark.master=yarn
kylin.engine.spark-conf.spark.submit.deployMode=client
kylin.engine.spark-conf.spark.sql.hive.metastore.version=2.3.9
kylin.engine.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/jars/*
kylin.engine.spark-conf.spark.stage.maxConsecutiveAttempts=1
# spark3 legacy config after calendar switch
kylin.engine.spark-conf.spark.sql.legacy.parquet.int96RebaseModeInWrite=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.parquet.datetimeRebaseModeInWrite=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.parquet.int96RebaseModeInRead=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.parquet.datetimeRebaseModeInRead=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.avro.datetimeRebaseModeInWrite=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.avro.datetimeRebaseModeInRead=LEGACY
kylin.engine.spark-conf.spark.sql.legacy.timeParserPolicy=LEGACY
kylin.engine.spark-conf.spark.sql.parquet.enableVectorizedReader=false
# spark3 close DPP feature
kylin.engine.spark-conf.spark.sql.optimizer.dynamicPartitionPruning.enabled=false
# ==================== QUERY SPARK CONTEXT & COLUMNAR STORAGE ====================
kylin.storage.quota-in-giga-bytes=10240
kylin.storage.columnar.shard-size-mb=256
# for any spark config entry in http://spark.apache.org/docs/latest/configuration.html#environment-variables, prefix it with "kap.storage.columnar.env" and append here
### path to hadoop conf, must be local on the server running kylin.sh. the value is set to be ${kylin_hadoop_conf_dir}, whose value will be set during bootstrap scripts.
### In most cases, the value can be auto-detected with possible value like /etc/hadoop/conf, so that normal users do not need to worry about this config.
kylin.storage.columnar.spark-env.HADOOP_CONF_DIR=${kylin_hadoop_conf_dir}
# for any spark config entry in http://spark.apache.org/docs/latest/configuration.html#spark-properties, prefix it with "kap.storage.columnar.spark-conf" and append here
kylin.storage.columnar.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=sparder -Dkap.spark.project=${job.project} -Dkap.spark.mountDir=${kylin.tool.mount-spark-log-dir} -XX:MaxDirectMemorySize=896M
kylin.storage.columnar.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-appmaster-log4j.xml
kylin.storage.columnar.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
#kylin.storage.columnar.spark-conf.spark.serializer=org.apache.spark.serializer.JavaSerializer
kylin.storage.columnar.spark-conf.spark.port.maxRetries=128
kylin.storage.columnar.spark-conf.spark.driver.host=${HOST_IP}
kylin.storage.columnar.spark-conf.spark.driver.memory=4096m
# to avoid sparder driver OOM for decoding data (do not exceed the `spark.driver.memory` configuration)
kylin.storage.columnar.spark-conf.spark.sql.driver.maxCollectSize=3600m
kylin.storage.columnar.spark-conf.spark.executor.memory=1024m
kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=512m
kylin.storage.columnar.spark-conf.spark.yarn.am.memory=1024m
kylin.storage.columnar.spark-conf.spark.executor.cores=1
kylin.storage.columnar.spark-conf.spark.executor.instances=1
kylin.storage.columnar.spark-conf.spark.task.maxFailures=1
kylin.storage.columnar.spark-conf.spark.ui.port=4041
kylin.storage.columnar.spark-conf.spark.locality.wait=0s
kylin.storage.columnar.spark-conf.spark.sql.dialect=hiveql
# to reduce spark optimize time
kylin.storage.columnar.spark-conf.spark.sql.constraintPropagation.enabled=false
# to avoid high concurrent case oom
kylin.storage.columnar.spark-conf.spark.ui.retainedStages=300
kylin.storage.columnar.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
kylin.storage.columnar.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch
kylin.storage.columnar.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true
kylin.storage.columnar.spark-conf.hive.execution.engine=MR
kylin.storage.columnar.spark-conf.spark.sql.crossJoin.enabled=true
kylin.storage.columnar.spark-conf.spark.broadcast.autoClean.enabled=true
kylin.storage.columnar.spark-conf.spark.sql.adaptive.enabled=false
kylin.storage.columnar.spark-conf.spark.sql.objectHashAggregate.sortBased.fallbackThreshold=1
kylin.storage.columnar.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER
kylin.storage.columnar.spark-conf.spark.sql.sources.bucketing.enabled=false
kylin.storage.columnar.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}
kylin.storage.columnar.spark-conf.spark.eventLog.enabled=true
kylin.storage.columnar.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/sparder-history
kylin.storage.columnar.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/sparder-history
kylin.storage.columnar.spark-conf.spark.eventLog.rolling.enabled=true
kylin.storage.columnar.spark-conf.spark.eventLog.rolling.maxFileSize=100m
kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.version=2.3.9
kylin.storage.columnar.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/jars/*
# to avoid cartesian partition oom, set to -1 or empty to turn off
kylin.storage.columnar.spark-conf.spark.sql.cartesianPartitionNumThreshold=-1
# disable parquet columnindex to save the overhead on read column index
# as column index won't be used by spark vectorized reader for now
kylin.storage.columnar.spark-conf.parquet.filter.columnindex.enabled=false
# spark3 legacy config after calendar switch
kylin.storage.columnar.spark-conf.spark.sql.legacy.parquet.int96RebaseModeInWrite=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.parquet.datetimeRebaseModeInWrite=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.parquet.int96RebaseModeInRead=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.parquet.datetimeRebaseModeInRead=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.avro.datetimeRebaseModeInWrite=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.avro.datetimeRebaseModeInRead=LEGACY
kylin.storage.columnar.spark-conf.spark.sql.legacy.timeParserPolicy=LEGACY
# spark3 close DPP feature
kylin.storage.columnar.spark-conf.spark.sql.optimizer.dynamicPartitionPruning.enabled=false
# ==================== JOB SCHEDULER ====================
# max job retry on error, default 0: no retry
kylin.job.retry=0
kylin.job.retry-interval=30000
# Max count of concurrent jobs running
kylin.job.max-concurrent-jobs=20
# If true, will send email notification;
kylin.job.notification-enabled=false
kylin.job.notification-mail-enable-starttls=true
kylin.job.notification-mail-host=smtp.office365.com
kylin.job.notification-mail-port=587
[email protected]
kylin.job.notification-mail-password=mypassword
[email protected]
kylin.env.max-keep-log-file-number=10
kylin.env.max-keep-log-file-threshold-mb=256
## use crontab to check whether the log files(shell.stderr, shell.stdout and kylin.out) needs to be rotated
kylin.env.log-rotate-check-cron=33 * * * *
## whether to enable auto log rotate
kylin.env.log-rotate-enabled=false
# ==================== QUERY ====================
kylin.query.pushdown-enabled=true
kylin.query.pushdown.runner-class-name=org.apache.kylin.query.pushdown.PushDownRunnerSparkImpl
kylin.query.pushdown.partition-check.runner-class-name=org.apache.kylin.query.pushdown.PushDownRunnerSparkImpl
kylin.query.escape-default-keyword=false
kylin.query.cache-enabled=true
kylin.query.force-limit=-1
kylin.query.replace-count-column-with-count-star=false
kylin.query.use-tableindex-answer-non-raw-query=false
kylin.query.query-with-execute-as=true
kylin.query.query-history-download-max-size=100000
kylin.query.query-history-download-batch-size=20000
kylin.query.engine.spark-scheduler-mode=FAIR
kylin.query.realization.chooser.thread-core-num=5
kylin.query.realization.chooser.thread-max-num=50
kylin.query.join-match-optimization-enabled=false
kylin.query.convert-count-distinct-expression-enabled=false
kylin.query.memory-limit-during-collect-mb=5400
kylin.query.cartesian-partition-num-threshold-factor=100
# spark context canary to monitor spark
kylin.canary.sqlcontext-enabled=false
# ==================== ASYNC QUERY ====================
kylin.query.async.result-retain-days=7d
kylin.query.unique-async-query-yarn-queue-enabled=false
#kylin.query.async-query.submit-hadoop-conf-dir=
kylin.query.async-query.spark-conf.spark.yarn.queue=default
kylin.query.async-query.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current -Dlog4j.configurationFile=spark-executor-log4j.xml -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=sparder -Dkap.spark.project=${job.project} -Dkap.spark.mountDir=${kylin.tool.mount-spark-log-dir} -XX:MaxDirectMemorySize=896M
kylin.query.async-query.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
kylin.query.async-query.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
kylin.query.async-query.spark-conf.spark.port.maxRetries=128
kylin.query.async-query.spark-conf.spark.driver.memory=2048m
kylin.query.async-query.spark-conf.spark.sql.driver.maxCollectSize=3600m
kylin.query.async-query.spark-conf.spark.executor.memory=2048m
kylin.query.async-query.spark-conf.spark.executor.memoryOverhead=1024m
kylin.query.async-query.spark-conf.spark.yarn.am.memory=1024m
kylin.query.async-query.spark-conf.spark.executor.cores=2
kylin.query.async-query.spark-conf.spark.executor.instances=1
kylin.query.async-query.spark-conf.spark.task.maxFailures=1
kylin.query.async-query.spark-conf.spark.ui.port=4041
kylin.query.async-query.spark-conf.spark.locality.wait=0s
kylin.query.async-query.spark-conf.spark.sql.dialect=hiveql
kylin.query.async-query.spark-conf.spark.sql.constraintPropagation.enabled=false
kylin.query.async-query.spark-conf.spark.ui.retainedStages=300
kylin.query.async-query.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
kylin.query.async-query.spark-conf.spark.hadoop.hive.exec.scratchdir=${kylin.env.hdfs-working-dir}/hive-scratch
kylin.query.async-query.spark-conf.spark.hadoop.mapreduce.input.fileinputformat.input.dir.recursive=true
kylin.query.async-query.spark-conf.hive.execution.engine=MR
kylin.query.async-query.spark-conf.spark.sql.crossJoin.enabled=true
kylin.query.async-query.spark-conf.spark.broadcast.autoClean.enabled=true
kylin.query.async-query.spark-conf.spark.sql.objectHashAggregate.sortBased.fallbackThreshold=1
kylin.query.async-query.spark-conf.spark.sql.hive.caseSensitiveInferenceMode=NEVER_INFER
kylin.query.async-query.spark-conf.spark.sql.sources.bucketing.enabled=false
kylin.query.async-query.spark-conf.spark.yarn.stagingDir=${kylin.env.hdfs-working-dir}
kylin.query.async-query.spark-conf.spark.eventLog.enabled=true
kylin.query.async-query.spark-conf.spark.history.fs.logDirectory=${kylin.env.hdfs-working-dir}/sparder-history
kylin.query.async-query.spark-conf.spark.eventLog.dir=${kylin.env.hdfs-working-dir}/sparder-history
kylin.query.async-query.spark-conf.spark.eventLog.rolling.enabled=true
kylin.query.async-query.spark-conf.spark.eventLog.rolling.maxFileSize=100m
kylin.query.async-query.spark-conf.spark.sql.hive.metastore.version=2.3.9
kylin.query.async-query.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/jars/*
kylin.query.async-query.spark-conf.spark.sql.cartesianPartitionNumThreshold=-1
kylin.query.async-query.spark-conf.parquet.filter.columnindex.enabled=false
kylin.query.async-query.spark-conf.spark.master=yarn
kylin.query.async-query.spark-conf.spark.submit.deployMode=client
# ==================== SEGMENT PRUNER ====================
# Enable query filter with dimension range
kylin.storage.columnar.dimension-range-filter-enabled=true
# ==================== QUERY SECURITY ====================
# Enable/disable ACL check for cube query
kylin.query.security.acl-tcr-enabled=true
# ==================== SYS SECURITY ====================
# Spring security profile, options: testing, ldap, saml
# with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
#kylin.security.profile=testing
# Default roles and admin roles in LDAP, for ldap and saml
kylin.security.acl.admin-role=ROLE_ADMIN
# LDAP authentication configuration
kylin.security.ldap.connection-server=ldap://ldap_server:389
#kylin.security.ldap.connection-username=
#kylin.security.ldap.connection-password=
# LDAP user account directory;
#kylin.security.ldap.user-search-base=
#kylin.security.ldap.user-search-pattern=
#kylin.security.ldap.user-group-search-base=
# LDAP service account directory
kylin.security.ldap.service-search-base=${kylin.security.ldap.user-search-base}
kylin.security.ldap.service-search-pattern=${kylin.security.ldap.user-search-pattern}
kylin.security.ldap.service-group-search-base=${kylin.security.ldap.user-group-search-base}
## LDAP filter ##
# used for searching all users
kylin.security.ldap.user-search-filter=(objectClass=person)
# used for searching all group members
kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
# used for searching all groups
kylin.security.ldap.group-search-filter=(|(objectClass=groupOfNames)(objectClass=group))
# used for searching all users in specific group
kylin.security.ldap.group-member-search-filter=(&(cn={0})(objectClass=groupOfNames))
## LDAP attribute ##
# used for getting user's identifier (eg: username)
kylin.security.ldap.user-identifier-attr=cn
# used for getting group's identifier
kylin.security.ldap.group-identifier-attr=cn
# used for identifying group's member
kylin.security.ldap.group-member-attr=member
## CUSTOM UserService default impl
#kylin.security.custom.user-service-class-name=
## CUSTOM UserGroupService default impl
#kylin.security.custom.user-group-service-class-name=
## CUSTOM Authentication provider default impl
#kylin.security.custom.authentication-provider-class-name=
## CUSTOM logout success handler default impl
kylin.security.custom.logout-success-handler=org.springframework.security.web.authentication.logout.SimpleUrlLogoutSuccessHandler
## Row Acl filter limit
kylin.security.acl.row-filter-limit-threshold=100
# ==================== Metrics ====================
kylin.metrics.influx-db=KE_METRICS
kylin.metrics.daily-influx-db=KE_METRICS_DAILY
kylin.metrics.influx-rpc-service-bind-address=127.0.0.1:8088
# ==================== Circuit Breaker ====================
kylin.circuit-breaker.enabled=true
kylin.circuit-breaker.threshold.project=100
kylin.circuit-breaker.threshold.model=100
kylin.circuit-breaker.threshold.query-result-row-count=2000000
# ==================== Kerberos ====================
kylin.kerberos.enabled=false
## Standard, FI
kylin.kerberos.platform=Standard
#kylin.kerberos.principal=
#kylin.kerberos.keytab=
kylin.kerberos.cache=kap_kerberos.cache
kylin.kerberos.krb5-conf=krb5.conf
kylin.kerberos.ticket-refresh-interval-minutes=720
kylin.kerberos.zookeeper-server-principal=zookeeper/hadoop
# ==================== Cache =======================
kylin.cache.config=classpath:ehcache.xml
kylin.cache.redis.enabled=false
kylin.cache.redis.cluster-enabled=false
kylin.cache.redis.hosts=localhost:6379
#kylin.cache.redis.password=
kylin.cache.redis.expire-time-unit=EX
kylin.cache.redis.expire-time=86400
kylin.cache.redis.exception-expire-time=600
kylin.cache.redis.connection-timeout=2000
kylin.cache.redis.so-timeout=2000
kylin.cache.redis.max-attempts=20
kylin.cache.redis.max-total=8
kylin.cache.redis.max-idle=8
kylin.cache.redis.min-idle=0
kylin.cache.redis.batch-count=100000
kylin.cache.redis.max-wait=300000
# ==================== Diagnosis ====================
#kylin.diag.package.timeout-seconds=3600
#kylin.diag.extraction.start-time-days=3
## Allow user to export query result
kylin.web.export-allow-admin=true
kylin.web.export-allow-other=true
## metastore check
kylin.health.metastore-warning-response-ms=300
kylin.health.metastore-error-response-ms=1000
## The unit is seconds, so 3600 equals one hour
kylin.source.load-hive-tablename-interval-seconds=3600
kylin.source.load-hive-tablename-enabled=true
## acl
kylin.security.allow-project-admin-grant-acl=true
## ha
kylin.server.leader-race.enabled=true
kylin.server.leader-race.heart-beat-timeout=60
kylin.server.leader-race.heart-beat-interval=30
## jstack
kylin.task.jstack-dump-enabled=true
kylin.task.jstack-dump-interval-minutes=10
kylin.task.jstack-dump-log-files-max-count=20
kylin.web.session.secure-random-create-enabled=false
kylin.web.session.jdbc-encode-enabled=false
#user password algorithm
kylin.security.user-password-encoder=org.apache.kylin.rest.security.CachedBCryptPasswordEncoder
# model
kylin.model.recommendation-page-size=500
kylin.model.dimension-measure-name.max-length=300
# recommend
kylin.model.suggest-model-sql-limit=200
# KG
kylin.guardian.enabled=false
# Tool
#kylin.tool.mount-spark-log-dir=
kylin.tool.clean-diag-tmp-file=false
kylin.tool.spark-log-extractor=org.apache.kylin.tool.YarnSparkLogExtractor
kylin.lightning.cluster-id=0
kylin.lightning.server.zookeeper-node=/kylin/management
kylin.favorite.import-sql-max-size=1000
kylin.model.measure-name-check-enabled=true
# ==================== Streaming ====================
kylin.streaming.spark-conf.spark.sql.hive.metastore.version=2.3.9
kylin.streaming.spark-conf.spark.sql.hive.metastore.jars=${KYLIN_HOME}/spark/jars/*
kylin.streaming.spark-conf.spark.driver.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${KYLIN_HOME}/logs
kylin.streaming.spark-conf.spark.executor.extraJavaOptions=-Dfile.encoding=UTF-8 -Dhdp.version=current -Dkylin.hdfs.working.dir=${kylin.env.hdfs-working-dir} -Dkap.metadata.identifier=${kylin.metadata.url.identifier} -Dkap.spark.category=streaming_job
# custom
kylin.metadata.random-admin-password.enabled=false
kylin.storage.columnar.spark-conf.spark.kubernetes.file.upload.path=${SPARK_UPLOAD_PATH}
kylin.storage.columnar.spark-conf.spark.master=local
kylin.storage.columnar.spark-conf.spark.serializer=org.apache.spark.serializer.KryoSerializer
kylin.storage.columnar.spark-conf.spark.default.parallelism=32
kylin.storage.columnar.spark-conf.spark.sql.shuffle.partitions=32
kylin.storage.columnar.spark-conf.spark.kubernetes.container.image=${SPARK_IMAGE}
kylin.storage.columnar.spark-conf.spark.kubernetes.container.image.pullPolicy=Always
kylin.storage.columnar.spark-conf.spark.kubernetes.namespace=${SPARK_NAMESPACE}
kylin.storage.columnar.spark-conf.spark.kubernetes.authenticate.driver.serviceAccountName=${SPARK_SERVICE_ACCOUNT_NAME}
kylin.storage.columnar.spark-conf.spark.kubernetes.authenticate.executor.serviceAccountName=${SPARK_SERVICE_ACCOUNT_NAME}
kylin.storage.columnar.spark-conf.spark.kubernetes.driver.limit.cores = 1
kylin.storage.columnar.spark-conf.spark.kubernetes.executor.limit.cores = 1
kylin.storage.columnar.spark-conf.spark.driver.cores=1
#kylin.storage.columnar.spark-conf.spark.driver.memory=2g
#kylin.storage.columnar.spark-conf.spark.driver.extraJavaOptions=-Djava.library.path=$SPARK_HOME/lib/native=-XX:MaxDirectMemorySize=1G
#kylin.storage.columnar.spark-conf.spark.executor.cores = 1
#kylin.storage.columnar.spark-conf.spark.executor.memory = 1g
#kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead= 512m
#kylin.storage.columnar.spark-conf.spark.executor.extraJavaOptions=-Djava.library.path=$SPARK_HOME/lib/native=-XX:MaxDirectMemorySize=1G
#kylin.storage.columnar.spark-conf.spark.executor.instances = 1
kylin.engine.spark.cluster-manager-class-name=org.apache.kylin.cluster.K8sClusterManager
kylin.engine.spark-conf.spark.master=k8s://https://kubernetes.default:443
kylin.engine.spark-conf.spark.kubernetes.container.image=${SPARK_IMAGE}
kylin.engine.spark-conf.spark.kubernetes.container.image.pullPolicy=Always
kylin.engine.spark-conf.spark.kubernetes.file.upload.path=${SPARK_UPLOAD_PATH}
kylin.engine.spark-conf.spark.kubernetes.namespace=${SPARK_NAMESPACE}
kylin.engine.spark-conf.spark.kubernetes.authenticate.driver.serviceAccountName=${SPARK_SERVICE_ACCOUNT_NAME}
kylin.engine.spark-conf.spark.kubernetes.authenticate.executor.serviceAccountName=${SPARK_SERVICE_ACCOUNT_NAME}
kylin.engine.spark-conf.spark.kubernetes.driver.limit.cores = 1
kylin.engine.spark-conf.spark.kubernetes.executor.limit.cores = 1
kylin.engine.spark-conf.spark.executor.cores = 1
kylin.engine.spark-conf.spark.executor.memory = 1g
kylin.engine.spark-conf.spark.executor.memoryOverhead= 512m
kylin.engine.spark-conf.spark.executor.instances = 1
kylin.metadata.distributed-lock-impl=org.apache.kylin.common.lock.jdbc.JdbcDistributedLockFactory
kylin.security.profile=custom
kylin.security.custom.user-service-class-name=io.kyligence.saas.iam.sdk.ke.IAMUserService
kylin.security.custom.user-group-service-class-name=io.kyligence.saas.iam.sdk.ke.IAMUserGroupService
kylin.security.custom.authentication-provider-class-name=io.kyligence.saas.iam.sdk.ke.IAMAuthenticationProvider
kyligence.kyiam.sdk.tenant-id=${TENANT_ID}
spring.session.store-type=none
spring.cloud.nacos.config.server-addr=${NACOS_CONFIG_SERVER_ADDR_NO_HTTP}
spring.cloud.nacos.discovery.service=data-loading
kyligence.kyiam.sdk.security.gateway-path-filter.ignore-url=/kylin/api/epoch/maintenance_mode,/kylin/api/prometheus,/kylin/api/user/update_user,/kylin/api/metastore/cleanup,/kylin/api/metastore/cleanup_storage,/kylin/api/epoch,/kylin/api/config/is_cloud,/api/system/license/file,/kylin/api/system/license/content,/kylin/api/system/license/trial,/api/system/license,/kylin/api/system/diag/progress,/kylin/api/system/roll_event_log,/api/user/authentication*/**,/kylin/api/system/backup,/kylin/api/cubes/src/tables,/kylin/api/admin/public_config,/kylin/api/admin/instance_info,/kylin/api/projects,/kylin/api/system/license/info,/kylin/api/health,/kylin/api/health/**,/api/prometheus,/kylin/api/broadcast/**,/kylin/api/models/model_info,/kylin/api/**/metrics,/kylin/api/cache*/**,/kylin/api/streaming_jobs/spark,/kylin/api/streaming_jobs/stats,/api/streaming_jobs/dataflow/**,/kylin/api/jobs/spark,/kylin/api/jobs/stage/status,/kylin/api/jobs/error,/api/jobs/wait_and_run_time,/kylin/api/system/clean_sparder_event_log
kyligence.kyiam.sdk.security.gateway-path-filter.forbid-url=/kylin/api/jobs/feign/**,/kylin/api/models/feign/**,/kylin/api/tables/feign/**,/kylin/api/job_delegate/**
kylin.engine.spark-conf.spark.kubernetes.executor.podNamePrefix=sparder-data-loading
kylin.task.upload-gc-log-enabled=true
kylin.task.upload-jstack-dump-enabled=true
kylin.log.gc-and-jstack.retention-time=7d