scala.googleapis.bigquery.SparkStatistics.scala Maven / Gradle / Ivy
package googleapis.bigquery
import io.circe._
import io.circe.syntax._
final case class SparkStatistics(
/** Output only. Endpoints returned from Dataproc. Key list: - history_server_endpoint: A link to Spark job UI.
*/
endpoints: Option[Map[String, String]] = None,
/** Output only. Spark job ID if a Spark job is created successfully.
*/
sparkJobId: Option[String] = None,
/** Output only. Location where the Spark job is executed. A location is selected by BigQueury for jobs configured to run in a multi-region.
*/
sparkJobLocation: Option[String] = None,
/** Output only. The Cloud KMS encryption key that is used to protect the resources created by the Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery job's project (if the CMEK organization policy is enforced). Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the procedure (if it is provided), or from the default key of the Spark connection's project if the CMEK organization policy is enforced. Example:
* `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
*/
kmsKeyName: Option[String] = None,
/** Output only. The Google Cloud Storage bucket that is used as the default file system by the Spark application. This field is only filled when the Spark procedure uses the invoker security mode. The `gcsStagingBucket` bucket is inferred from the `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise, BigQuery creates a default staging bucket for the job and returns the bucket name in this field. Example:
* `gs://[bucket_name]`
*/
gcsStagingBucket: Option[String] = None,
/** Output only. Logging info is used to generate a link to Cloud Logging.
*/
loggingInfo: Option[SparkLoggingInfo] = None,
)
object SparkStatistics {
implicit val encoder: Encoder[SparkStatistics] = Encoder.instance { x =>
Json.obj(
"endpoints" := x.endpoints,
"sparkJobId" := x.sparkJobId,
"sparkJobLocation" := x.sparkJobLocation,
"kmsKeyName" := x.kmsKeyName,
"gcsStagingBucket" := x.gcsStagingBucket,
"loggingInfo" := x.loggingInfo,
)
}
implicit val decoder: Decoder[SparkStatistics] = Decoder.instance { c =>
for {
v0 <- c.get[Option[Map[String, String]]]("endpoints")
v1 <- c.get[Option[String]]("sparkJobId")
v2 <- c.get[Option[String]]("sparkJobLocation")
v3 <- c.get[Option[String]]("kmsKeyName")
v4 <- c.get[Option[String]]("gcsStagingBucket")
v5 <- c.get[Option[SparkLoggingInfo]]("loggingInfo")
} yield SparkStatistics(v0, v1, v2, v3, v4, v5)
}
}