All Downloads are FREE. Search and download functionalities are using the official Maven repository.

yka.tasks-core_2.13.3.0.0-M15.source-code.reference.conf Maven / Gradle / Ivy

The newest version!


###################
# Task framework  #
###################

hosts.port = 28888

# host image if any
# hosts.image = ?

# number of available CPU's on the current machine
hosts.numCPU = 1

# list of gpus
hosts.gpus = []

# list of gpus, as integers separated by comma
# for use of command line specification
# will be concatenated to hosts.gpus
hosts.gpusAsCommaString = ""

# number of available ram on the current machine in Mb
hosts.RAM = 96000

# available scratch space on the current machine in Mb
hosts.scratch = 1000000

# number of CPU's reserved for purposes other than Tasks
hosts.reservedCPU = 0

# hostname of the current machine.
hosts.hostname = "localhost"

hosts.app = true

# !Important!
tasks.fileservice.storageURI = "./"

# if true the local storage will be proxied to remotes
# data transfer is done via akka remoting
tasks.fileservice.proxyStorage = false

# Turns on the http client 
tasks.fileservice.remote.http = false 

# Turns on the s3 client. It is also turned on if the storageURI has an s3 scheme.
tasks.fileservice.remote.s3 = false

# NOENGINE or fqcn of a suitable implementation 
# which is an object extending ElasticSupport[A,B]
# e.g.:
# tasks.elastic.ec2.EC2ElasticSupport
# tasks.elastic.ssh.SSHElasticSupport
# tasks.elastic.sh.SHElasticSupport
tasks.elastic.engine = "NOENGINE"

# Keep track of the progress or not?
tasks.cache.enabled = true

# How to locate cache files
# Possible values are
# - "prefix" in which case it follows 
#   the same prefix what is used to create regular files 
# - a relative path separated by '/' in which case all cache meta files are 
#   placed in that folder, relative to the storage root. 
tasks.cache.sharefilecache.path = prefix 

# Save input of tasks in the cache?
tasks.cache.saveTaskDescription = false

# Timeout after which to give up waiting to persist results into the cache
tasks.cache.timeout = 10 minutes

# Parallelism of the check whether a file handle in the cache is accessible
# This many file handles are concurrently checked
tasks.cache.accessibility-check-parallelism = 32

# Cache checks whether a file is still accessible. If not treats it as no hit.
tasks.verifySharedFileInCache = true

# !Important!
# Resubmit failed tasks for execution.
# Leads to infinite loop if failure is systematic.
tasks.resubmitFailedTask = false

# Ping interval between worker and queue.
tasks.askInterval = 100 ms

tasks.disableRemoting = false

tasks.skipContentHashVerificationAfterCache = false
tasks.skipContentHashCreationUponImport = false

tasks.s3.regionProfileName = "default"

tasks.s3.serverSideEncryption = "AES256"

tasks.s3.cannedAcls = []

tasks.s3.grantFullControl = []

tasks.s3.uploadParallelism = 4

# Kill a node if it is idle after x time.
tasks.elastic.idleNodeTimeout = 900 s

# Max nodes to spawn at a given instant
tasks.elastic.maxNodes = 1

# Max nodes to spawn, cumulative from start of application
tasks.elastic.maxNodesCumulative = 10

# Max pending nodes. After this is reached no new node request is submitted.
tasks.elastic.maxPending = ${tasks.elastic.maxNodes}

# Time to wait for a pending node to come online. 
# The request is removed after this time if the node is still not online.
tasks.elastic.pendingNodeTimeout = 120 s

# The elastic subsystem checks the task queue this often.
tasks.elastic.queueCheckInterval = 60 s

tasks.elastic.queueCheckInitialDelay = 5 s

tasks.elastic.nodeKillerMonitorInterval = 5 s

tasks.elastic.logQueueStatus = true

# The jvmMaxHeapFactor * requestedMemOfNewNode is assigned to -Xmx
tasks.elastic.jvmMaxHeapFactor = 0.85

# This is an object in which every child object represents a node.
# The child objects have the following fields:
# hostname keyFile username memory cpu extraArgs scratch
# keyFile points to unencrypted passphrase-less openssh keyfile.
tasks.elastic.ssh.hosts = {}

tasks.fileSendChunkSize = 16 MiB

tasks.elastic.javaCommandLine = ""

tasks.elastic.checktempfolder = false

tasks.elastic.workerWorkingDirectory = """./"""

tasks.elastic.workerPackageName = "package"

# Settings for the Amazon AWS interface.
# These are mostly in line with the LSF settings adapted to AWS.
tasks.elastic.aws {

      terminateMaster = false

      # aws region e.g. us-east-1 
      # leave empty for using the default region chain
      region = ""

      spotPrice = 0.271

      # instance store 64 bit: ami-570f603e
      # for cluster compute nodes: ami-a73758ce
      ami = "ami-a73758ce"


      securityGroup = ""

      securityGroups = []

      subnetId = ""

      extraFilesFromS3 = []

      extraStartupScript = ""

      jvmMaxHeapFactor = 0.5

      keyName = ""

      iamRole = ""

      placementGroup = ""

      tags = []

      instances = [


            {
                  name = m4.large
                  cpu = 2 
                  ram = 8000 
                  gpu = 0
            },
            {
                  name = m4.xlarge
                  cpu = 4 
                  ram = 16000 
                  gpu = 0
            },
            {
                  name = m4.2xlarge
                  cpu = 8 
                  ram = 32000 
                  gpu = 0
            },
            {
                  name = m4.4xlarge
                  cpu = 16 
                  ram = 64000 
                  gpu = 0
            },
            {
                  name = m4.16xlarge
                  cpu = 64 
                  ram = 256000 
                  gpu = 0
            },
       
            {
                  name = c6i.large
                  cpu = 2 
                  ram = 4000 
                  gpu = 0
            },
            {
                  name = c6i.xlarge
                  cpu = 4 
                  ram = 8000 
                  gpu = 0
            },
            {
                  name = c6i.2xlarge
                  cpu = 8 
                  ram = 16000 
                  gpu = 0
            },
            {
                  name = c6i.4xlarge
                  cpu = 16
                  ram = 32000
                  gpu = 0
            },
            {
                  name = c6i.8xlarge
                  cpu = 32
                  ram = 64000
                  gpu = 0
            },
            {
                  name = c6i.16xlarge
                  cpu = 64
                  ram = 128000
                  gpu = 0
            },
            {
                  name = c6i.32xlarge
                  cpu = 128
                  ram = 256000
                  gpu = 0
            },

            {
                  name = r6i.large
                  cpu = 2 
                  ram = 16000 
                  gpu = 0
            },
            {
                  name = r6i.xlarge
                  cpu = 4
                  ram = 32000 
                  gpu = 0
            },
            {
                  name = r6i.2xlarge
                  cpu = 8
                  ram = 64000
                  gpu = 0
            },
            {
                  name = r6i.4xlarge
                  cpu = 16
                  ram = 128000
                  gpu = 0
            },
            {
                  name = r6i.8xlarge
                  cpu = 32
                  ram = 256000
                  gpu = 0
            },
            {
    
                  name = p2.xlarge
                  cpu = 4
                  ram = 61000 
                  gpu = 1
            },
            {
    
                  name = p2.8xlarge
                  cpu = 32
                  ram = 488000 
                  gpu = 8
            },
            {
                  name = p3.2xlarge
                  cpu = 8
                  ram = 61000 
                  gpu = 1
            },
            {
                  name = p3.8xlarge
                  cpu = 32
                  ram = 244000 
                  gpu = 4
            },
             
      ]

}

# Akka DeadlineFailureDetector
tasks.failuredetector {

	heartbeat-interval = 1000 ms

      # if heatbeats pause for this duration, the detector will pull alarm
      acceptable-heartbeat-pause = 600 s
}

tasks.codeVersion = undefined

tasks.akka.actorsystem.name = tasks

tasks.addShutdownHook = true

tasks.ui.fqcn = ""

tasks.ui.queue.host = "localhost"

tasks.ui.queue.port = "28880"

tasks.ui.app.host = "localhost"

tasks.ui.app.port = "28881"

tasks.kubernetes.image = ""

tasks.kubernetes.namespace = "default"

# list groups of 5 values
# group is: effect,key,operator,seconds,value
# will be applied to pods with gpu limit > 0
tasks.kubernetes.tolerations = []

# raw json string of podspec template
# tasks.kubernetes.podSpec = ""

tasks.kubernetes.image-pull-policy = "IfNotPresent"

# e.g. in container spec:
# - name: MY_POD_IP
#   valueFrom:
#     fieldRef:
#       fieldPath: status.podIP
tasks.kubernetes.hostnameOrIPEnvVar = "TASKS_K8S_MY_POD_IP"

# - name: MY_CPU_LIMIT
#   valueFrom:
#     resourceFieldRef:
#       containerName: test-container
#       resource: limits.cpu
tasks.kubernetes.cpuLimitEnvVar = "TASKS_K8S_MY_CPU_LIMIT"

# - name: MY_RAM_LIMIT
#   valueFrom:
#     resourceFieldRef:
#       containerName: test-container
#       resource: limits.memory
tasks.kubernetes.ramLimitEnvVar = "TASKS_K8S_MY_RAM_LIMIT"

# - name: MY_SCRATCH_LIMIT
#   valueFrom:
#     resourceFieldRef:
#       containerName: test-container
#       resource: limits.ephemeral-storage
tasks.kubernetes.scratchLimitEnvVar = "TASKS_K8S_MY_SCRATCH_LIMIT"

# these resources will be added to the kubernetes resource request
tasks.kubernetes.extralimits.cpu = 0
tasks.kubernetes.extralimits.ram = 500

tasks.kubernetes.nodeSelector = []

# always ask at least minimumlimits + extralimits 
tasks.kubernetes.minimumlimits.cpu = 1
tasks.kubernetes.minimumlimits.ram = 500

tasks.worker-main-class = ""

tasks.createFilePrefixForTaskId = true

tasks.fileservice.allowDeletion = false

tasks.fileservice.allowOverwrite = false

tasks.fileservice.folderFileStorageCompleteFileCheck = true

tasks.maxConfigLoadInterval = 10 seconds

# 'default' or 'none' or an fqcn
tasks.tracker.fqcn = "none"

tasks.tracker.logFile = "resourceUsage.log.json"

tasks.queue.trackDataFlow = false

tasks.queue.track-data-flow-history-file-read-parallelism = 32

tasks.fileservice.writeFileHistories = false

tasks.elastic.sh.workdir = "."

# force worker nodes to connect to a proxy file storage
tasks.fileservice.connectToProxy = false

######################
# Task subsystem end #
######################




© 2015 - 2025 Weber Informatics LLC | Privacy Policy