com.google.api.services.dataproc.v1beta2.model.PySparkJob Maven / Gradle / Ivy
/*
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/google/apis-client-generator/
* (build: 2018-05-04 17:28:03 UTC)
* on 2018-09-12 at 16:10:30 UTC
* Modify at your own risk.
*/
package com.google.api.services.dataproc.v1beta2.model;
/**
* A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-
* programming-guide.html) applications on YARN.
*
* This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Dataproc API. For a detailed explanation see:
* https://developers.google.com/api-client-library/java/google-http-java-client/json
*
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class PySparkJob extends com.google.api.client.json.GenericJson {
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar,
* .tar.gz, .tgz, and .zip.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List archiveUris;
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that
* can be set as job properties, since a collision may occur that causes an incorrect job
* submission.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List args;
/**
* Optional. HCFS URIs of files to be copied to the working directory of Python drivers and
* distributed tasks. Useful for naively parallel tasks.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List fileUris;
/**
* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List jarFileUris;
/**
* Optional. The runtime log config for job execution.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private LoggingConfig loggingConfig;
/**
* Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String mainPythonFileUri;
/**
* Optional. A mapping of property names to values, used to configure PySpark. Properties that
* conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties
* set in /etc/spark/conf/spark-defaults.conf and classes in user code.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.Map properties;
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List pythonFileUris;
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar,
* .tar.gz, .tgz, and .zip.
* @return value or {@code null} for none
*/
public java.util.List getArchiveUris() {
return archiveUris;
}
/**
* Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar,
* .tar.gz, .tgz, and .zip.
* @param archiveUris archiveUris or {@code null} for none
*/
public PySparkJob setArchiveUris(java.util.List archiveUris) {
this.archiveUris = archiveUris;
return this;
}
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that
* can be set as job properties, since a collision may occur that causes an incorrect job
* submission.
* @return value or {@code null} for none
*/
public java.util.List getArgs() {
return args;
}
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that
* can be set as job properties, since a collision may occur that causes an incorrect job
* submission.
* @param args args or {@code null} for none
*/
public PySparkJob setArgs(java.util.List args) {
this.args = args;
return this;
}
/**
* Optional. HCFS URIs of files to be copied to the working directory of Python drivers and
* distributed tasks. Useful for naively parallel tasks.
* @return value or {@code null} for none
*/
public java.util.List getFileUris() {
return fileUris;
}
/**
* Optional. HCFS URIs of files to be copied to the working directory of Python drivers and
* distributed tasks. Useful for naively parallel tasks.
* @param fileUris fileUris or {@code null} for none
*/
public PySparkJob setFileUris(java.util.List fileUris) {
this.fileUris = fileUris;
return this;
}
/**
* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
* @return value or {@code null} for none
*/
public java.util.List getJarFileUris() {
return jarFileUris;
}
/**
* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.
* @param jarFileUris jarFileUris or {@code null} for none
*/
public PySparkJob setJarFileUris(java.util.List jarFileUris) {
this.jarFileUris = jarFileUris;
return this;
}
/**
* Optional. The runtime log config for job execution.
* @return value or {@code null} for none
*/
public LoggingConfig getLoggingConfig() {
return loggingConfig;
}
/**
* Optional. The runtime log config for job execution.
* @param loggingConfig loggingConfig or {@code null} for none
*/
public PySparkJob setLoggingConfig(LoggingConfig loggingConfig) {
this.loggingConfig = loggingConfig;
return this;
}
/**
* Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
* @return value or {@code null} for none
*/
public java.lang.String getMainPythonFileUri() {
return mainPythonFileUri;
}
/**
* Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.
* @param mainPythonFileUri mainPythonFileUri or {@code null} for none
*/
public PySparkJob setMainPythonFileUri(java.lang.String mainPythonFileUri) {
this.mainPythonFileUri = mainPythonFileUri;
return this;
}
/**
* Optional. A mapping of property names to values, used to configure PySpark. Properties that
* conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties
* set in /etc/spark/conf/spark-defaults.conf and classes in user code.
* @return value or {@code null} for none
*/
public java.util.Map getProperties() {
return properties;
}
/**
* Optional. A mapping of property names to values, used to configure PySpark. Properties that
* conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties
* set in /etc/spark/conf/spark-defaults.conf and classes in user code.
* @param properties properties or {@code null} for none
*/
public PySparkJob setProperties(java.util.Map properties) {
this.properties = properties;
return this;
}
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* @return value or {@code null} for none
*/
public java.util.List getPythonFileUris() {
return pythonFileUris;
}
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* @param pythonFileUris pythonFileUris or {@code null} for none
*/
public PySparkJob setPythonFileUris(java.util.List pythonFileUris) {
this.pythonFileUris = pythonFileUris;
return this;
}
@Override
public PySparkJob set(String fieldName, Object value) {
return (PySparkJob) super.set(fieldName, value);
}
@Override
public PySparkJob clone() {
return (PySparkJob) super.clone();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy