com.google.api.services.bigquery.model.SparkStatistics Maven / Gradle / Ivy
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.bigquery.model;
/**
* Statistics for a BigSpark query. Populated as part of JobStatistics2
*
* This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the BigQuery API. For a detailed explanation see:
* https://developers.google.com/api-client-library/java/google-http-java-client/json
*
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class SparkStatistics extends com.google.api.client.json.GenericJson {
/**
* Output only. Endpoints returned from Dataproc. Key list: - history_server_endpoint: A link to
* Spark job UI.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.Map endpoints;
/**
* Output only. The Google Cloud Storage bucket that is used as the default file system by the
* Spark application. This field is only filled when the Spark procedure uses the invoker security
* mode. The `gcsStagingBucket` bucket is inferred from the
* `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise,
* BigQuery creates a default staging bucket for the job and returns the bucket name in this
* field. Example: * `gs://[bucket_name]`
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String gcsStagingBucket;
/**
* Output only. The Cloud KMS encryption key that is used to protect the resources created by the
* Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key
* is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`,
* or the default key of the BigQuery job's project (if the CMEK organization policy is enforced).
* Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the
* procedure (if it is provided), or from the default key of the Spark connection's project if the
* CMEK organization policy is enforced. Example: *
* `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String kmsKeyName;
/**
* Output only. Logging info is used to generate a link to Cloud Logging.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private SparkLoggingInfo loggingInfo;
/**
* Output only. Spark job ID if a Spark job is created successfully.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String sparkJobId;
/**
* Output only. Location where the Spark job is executed. A location is selected by BigQueury for
* jobs configured to run in a multi-region.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String sparkJobLocation;
/**
* Output only. Endpoints returned from Dataproc. Key list: - history_server_endpoint: A link to
* Spark job UI.
* @return value or {@code null} for none
*/
public java.util.Map getEndpoints() {
return endpoints;
}
/**
* Output only. Endpoints returned from Dataproc. Key list: - history_server_endpoint: A link to
* Spark job UI.
* @param endpoints endpoints or {@code null} for none
*/
public SparkStatistics setEndpoints(java.util.Map endpoints) {
this.endpoints = endpoints;
return this;
}
/**
* Output only. The Google Cloud Storage bucket that is used as the default file system by the
* Spark application. This field is only filled when the Spark procedure uses the invoker security
* mode. The `gcsStagingBucket` bucket is inferred from the
* `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise,
* BigQuery creates a default staging bucket for the job and returns the bucket name in this
* field. Example: * `gs://[bucket_name]`
* @return value or {@code null} for none
*/
public java.lang.String getGcsStagingBucket() {
return gcsStagingBucket;
}
/**
* Output only. The Google Cloud Storage bucket that is used as the default file system by the
* Spark application. This field is only filled when the Spark procedure uses the invoker security
* mode. The `gcsStagingBucket` bucket is inferred from the
* `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise,
* BigQuery creates a default staging bucket for the job and returns the bucket name in this
* field. Example: * `gs://[bucket_name]`
* @param gcsStagingBucket gcsStagingBucket or {@code null} for none
*/
public SparkStatistics setGcsStagingBucket(java.lang.String gcsStagingBucket) {
this.gcsStagingBucket = gcsStagingBucket;
return this;
}
/**
* Output only. The Cloud KMS encryption key that is used to protect the resources created by the
* Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key
* is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`,
* or the default key of the BigQuery job's project (if the CMEK organization policy is enforced).
* Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the
* procedure (if it is provided), or from the default key of the Spark connection's project if the
* CMEK organization policy is enforced. Example: *
* `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
* @return value or {@code null} for none
*/
public java.lang.String getKmsKeyName() {
return kmsKeyName;
}
/**
* Output only. The Cloud KMS encryption key that is used to protect the resources created by the
* Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key
* is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`,
* or the default key of the BigQuery job's project (if the CMEK organization policy is enforced).
* Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the
* procedure (if it is provided), or from the default key of the Spark connection's project if the
* CMEK organization policy is enforced. Example: *
* `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
* @param kmsKeyName kmsKeyName or {@code null} for none
*/
public SparkStatistics setKmsKeyName(java.lang.String kmsKeyName) {
this.kmsKeyName = kmsKeyName;
return this;
}
/**
* Output only. Logging info is used to generate a link to Cloud Logging.
* @return value or {@code null} for none
*/
public SparkLoggingInfo getLoggingInfo() {
return loggingInfo;
}
/**
* Output only. Logging info is used to generate a link to Cloud Logging.
* @param loggingInfo loggingInfo or {@code null} for none
*/
public SparkStatistics setLoggingInfo(SparkLoggingInfo loggingInfo) {
this.loggingInfo = loggingInfo;
return this;
}
/**
* Output only. Spark job ID if a Spark job is created successfully.
* @return value or {@code null} for none
*/
public java.lang.String getSparkJobId() {
return sparkJobId;
}
/**
* Output only. Spark job ID if a Spark job is created successfully.
* @param sparkJobId sparkJobId or {@code null} for none
*/
public SparkStatistics setSparkJobId(java.lang.String sparkJobId) {
this.sparkJobId = sparkJobId;
return this;
}
/**
* Output only. Location where the Spark job is executed. A location is selected by BigQueury for
* jobs configured to run in a multi-region.
* @return value or {@code null} for none
*/
public java.lang.String getSparkJobLocation() {
return sparkJobLocation;
}
/**
* Output only. Location where the Spark job is executed. A location is selected by BigQueury for
* jobs configured to run in a multi-region.
* @param sparkJobLocation sparkJobLocation or {@code null} for none
*/
public SparkStatistics setSparkJobLocation(java.lang.String sparkJobLocation) {
this.sparkJobLocation = sparkJobLocation;
return this;
}
@Override
public SparkStatistics set(String fieldName, Object value) {
return (SparkStatistics) super.set(fieldName, value);
}
@Override
public SparkStatistics clone() {
return (SparkStatistics) super.clone();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy