All Downloads are FREE. Search and download functionalities are using the official Maven repository.

.spark-jobserver.0.6.1.source-code.application.conf Maven / Gradle / Ivy

The newest version!
# Settings for safe local mode development
spark {
  master = "local[4]"
  # spark web UI port
  webUrlPort = 8080

  jobserver {
    port = 8090
    bind-address = "0.0.0.0"

    # Number of job results to keep per JobResultActor/context
    job-result-cache-size = 5000

    jobdao = spark.jobserver.io.JobFileDAO

    # Automatically load a set of jars at startup time.  Key is the appName, value is the path/URL.
    # job-jar-paths {
    #   test = /path/to/my/test.jar
    # }

    filedao {
      rootdir = /tmp/spark-jobserver/filedao/data
    }

    datadao {
      # storage directory for files that are uploaded to the server
      # via POST/data commands
      rootdir = /tmp/spark-jobserver/upload
    }

    # To load up job jars on startup, place them here,
    # with the app name as the key and the path to the jar as the value
    # job-jar-paths {
    #   test = ../job-server-tests/target/scala-2.10/job-server-tests_2.10-0.5.3-SNAPSHOT.jar
    # }

    sqldao {
      # Slick database driver, full classpath
      slick-driver = scala.slick.driver.H2Driver

      # JDBC driver, full classpath
      jdbc-driver = org.h2.Driver

      # Directory where default H2 driver stores its data. Only needed for H2.
      rootdir = /tmp/spark-jobserver/sqldao/data

      # Full JDBC URL / init string, along with username and password.  Sorry, needs to match above.
      # Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass
      jdbc {
        url = "jdbc:h2:file:/tmp/spark-jobserver/sqldao/data/h2-db"
        user = ""
        password = ""
      }

      # DB connection pool settings
      dbcp {
        maxactive = 20
        maxidle = 10
        initialsize = 10
      }
    }

    # The ask pattern timeout for Api
    short-timeout = 3 s

    # Time out for job server to wait while creating contexts
    context-creation-timeout = 15 s

    # Number of jobs that can be run simultaneously per context
    # If not set, defaults to number of cores on machine where jobserver is running
    max-jobs-per-context = 8

    # in yarn deployment, time out for job server to wait while creating contexts
    yarn-context-creation-timeout = 40 s

    # spark broadcst factory in yarn deployment
    # Versions prior to 1.1.0, spark default broadcast factory is org.apache.spark.broadcast.HttpBroadcastFactory.
    # Can't start multiple sparkContexts in the same JVM with HttpBroadcastFactory.
    yarn-broadcast-factory = org.apache.spark.broadcast.TorrentBroadcastFactory
  }

  # predefined Spark contexts
  # Below is an example, but do not uncomment it.   Everything defined here is carried over to
  # deploy-time configs, so they will be created in all environments.  :(
  contexts {
    # abc-demo {
    #   num-cpu-cores = 4            # Number of cores to allocate.  Required.
    #   memory-per-node = 1024m      # Executor memory per node, -Xmx style eg 512m, 1G, etc.
    # }
    # define additional contexts here
  }

  # Default settings for ad hoc as well as manually created contexts
  # You can add any Spark config params here, for example, spark.mesos.coarse = true
  context-settings {
    num-cpu-cores = 4           # Number of cores to allocate.  Required.
    memory-per-node = 512m      # Executor memory per node, -Xmx style eg 512m, 1G, etc.

    # A zero-arg class implementing spark.jobserver.context.SparkContextFactory
    # Determines the type of jobs that can run in a SparkContext
    context-factory = spark.jobserver.context.DefaultSparkContextFactory

    streaming {
      # Default batch interval for Spark Streaming contexts in milliseconds
      batch_interval = 1000

      # if true, stops gracefully by waiting for the processing of all received data to be completed
      stopGracefully = true

      # if true, stops the SparkContext with the StreamingContext. The underlying SparkContext will be
      # stopped regardless of whether the StreamingContext has been started.
      stopSparkContext = true
    }

    # uris of jars to be loaded into the classpath for this context. Uris is a string list, or a string separated by commas ','
    # dependent-jar-uris = ["file:///some/path/present/in/each/mesos/slave/somepackage.jar"]

    passthrough {
      spark.driver.allowMultipleContexts = true  # Ignore the Multiple context exception related with SPARK-2243
    }
  }
}

akka {
  # Use SLF4J/logback for deployed environment logging
  loggers = ["akka.event.slf4j.Slf4jLogger"]
}

# check the reference.conf in spray-can/src/main/resources for all defined settings
spray.can.server {
  # uncomment the next lines for making this an HTTPS example
  # ssl-encryption = on
  # path to keystore
  #keystore = "/some/path/sjs.jks"
  #keystorePW = "changeit"

  # see http://docs.oracle.com/javase/7/docs/technotes/guides/security/StandardNames.html#SSLContext for more examples
  # typical are either SSL or TLS
  encryptionType = "SSL"
  keystoreType = "JKS"
  # key manager factory provider
  provider = "SunX509"
  # ssl engine provider protocols
  enabledProtocols = ["SSLv3", "TLSv1"]
  idle-timeout = 60 s
  request-timeout = 40 s
  pipelining-limit = 2 # for maximum performance (prevents StopReading / ResumeReading messages to the IOBridge)
  # Needed for HTTP/1.0 requests with missing Host headers
  default-host-header = "spray.io:8765"

  # Increase this in order to upload bigger job jars
  parsing.max-content-length = 30m
}

shiro {
    # activate authentication (and authorization)
    authentication = off
    # absolute path to shiro config file, including file name
    config.path = "/some/path/shiro.ini"
    # note that ssl-encryption should also be on when authentication is on to avoid that passwords
    # can be sniffed out
    # Time out for job server to wait for authentication requests
    authentication-timeout = 10 s
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy