Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
package googleapis.bigquery
import io.circe._
import io.circe.syntax._
final case class SparkOptions(
/** Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see [Apache Spark](https://spark.apache.org/docs/latest/index.html).
*/
archiveUris: Option[List[String]] = None,
/** The fully qualified name of a class in jar_uris, for example, com.example.wordcount. Exactly one of main_class and main_jar_uri field should be set for Java/Scala language type.
*/
mainClass: Option[String] = None,
/** Runtime version. If not specified, the default runtime version is used.
*/
runtimeVersion: Option[String] = None,
/** Files to be placed in the working directory of each executor. For more information about Apache Spark, see [Apache Spark](https://spark.apache.org/docs/latest/index.html).
*/
fileUris: Option[List[String]] = None,
/** The main file/jar URI of the Spark application. Exactly one of the definition_body field and the main_file_uri field must be set for Python. Exactly one of main_class and main_file_uri field should be set for Java/Scala language type.
*/
mainFileUri: Option[String] = None,
/** Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: `.py`, `.egg`, and `.zip`. For more information about Apache Spark, see [Apache Spark](https://spark.apache.org/docs/latest/index.html).
*/
pyFileUris: Option[List[String]] = None,
/** Custom container image for the runtime environment.
*/
containerImage: Option[String] = None,
/** Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. For more information, see [Apache Spark](https://spark.apache.org/docs/latest/index.html) and the [procedure option list](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#procedure_option_list).
*/
properties: Option[Map[String, String]] = None,
/** JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see [Apache Spark](https://spark.apache.org/docs/latest/index.html).
*/
jarUris: Option[List[String]] = None,
/** Fully qualified name of the user-provided Spark connection object. Format: ```"projects/{project_id}/locations/{location_id}/connections/{connection_id}"```
*/
connection: Option[String] = None,
)
object SparkOptions {
implicit val encoder: Encoder[SparkOptions] = Encoder.instance { x =>
Json.obj(
"archiveUris" := x.archiveUris,
"mainClass" := x.mainClass,
"runtimeVersion" := x.runtimeVersion,
"fileUris" := x.fileUris,
"mainFileUri" := x.mainFileUri,
"pyFileUris" := x.pyFileUris,
"containerImage" := x.containerImage,
"properties" := x.properties,
"jarUris" := x.jarUris,
"connection" := x.connection,
)
}
implicit val decoder: Decoder[SparkOptions] = Decoder.instance { c =>
for {
v0 <- c.get[Option[List[String]]]("archiveUris")
v1 <- c.get[Option[String]]("mainClass")
v2 <- c.get[Option[String]]("runtimeVersion")
v3 <- c.get[Option[List[String]]]("fileUris")
v4 <- c.get[Option[String]]("mainFileUri")
v5 <- c.get[Option[List[String]]]("pyFileUris")
v6 <- c.get[Option[String]]("containerImage")
v7 <- c.get[Option[Map[String, String]]]("properties")
v8 <- c.get[Option[List[String]]]("jarUris")
v9 <- c.get[Option[String]]("connection")
} yield SparkOptions(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9)
}
}