com.google.api.services.dataproc.model.PySparkBatch Maven / Gradle / Ivy
The newest version!
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.dataproc.model;
/**
* A configuration for running an Apache PySpark
* (https://spark.apache.org/docs/latest/api/python/getting_started/quickstart.html) batch workload.
*
* This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Cloud Dataproc API. For a detailed explanation see:
* https://developers.google.com/api-client-library/java/google-http-java-client/json
*
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class PySparkBatch extends com.google.api.client.json.GenericJson {
/**
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List archiveUris;
/**
* Optional. The arguments to pass to the driver. Do not include arguments that can be set as
* batch properties, such as --conf, since a collision can occur that causes an incorrect batch
* submission.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List args;
/**
* Optional. HCFS URIs of files to be placed in the working directory of each executor.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List fileUris;
/**
* Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List jarFileUris;
/**
* Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String mainPythonFileUri;
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List pythonFileUris;
/**
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* @return value or {@code null} for none
*/
public java.util.List getArchiveUris() {
return archiveUris;
}
/**
* Optional. HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* @param archiveUris archiveUris or {@code null} for none
*/
public PySparkBatch setArchiveUris(java.util.List archiveUris) {
this.archiveUris = archiveUris;
return this;
}
/**
* Optional. The arguments to pass to the driver. Do not include arguments that can be set as
* batch properties, such as --conf, since a collision can occur that causes an incorrect batch
* submission.
* @return value or {@code null} for none
*/
public java.util.List getArgs() {
return args;
}
/**
* Optional. The arguments to pass to the driver. Do not include arguments that can be set as
* batch properties, such as --conf, since a collision can occur that causes an incorrect batch
* submission.
* @param args args or {@code null} for none
*/
public PySparkBatch setArgs(java.util.List args) {
this.args = args;
return this;
}
/**
* Optional. HCFS URIs of files to be placed in the working directory of each executor.
* @return value or {@code null} for none
*/
public java.util.List getFileUris() {
return fileUris;
}
/**
* Optional. HCFS URIs of files to be placed in the working directory of each executor.
* @param fileUris fileUris or {@code null} for none
*/
public PySparkBatch setFileUris(java.util.List fileUris) {
this.fileUris = fileUris;
return this;
}
/**
* Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
* @return value or {@code null} for none
*/
public java.util.List getJarFileUris() {
return jarFileUris;
}
/**
* Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
* @param jarFileUris jarFileUris or {@code null} for none
*/
public PySparkBatch setJarFileUris(java.util.List jarFileUris) {
this.jarFileUris = jarFileUris;
return this;
}
/**
* Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
* @return value or {@code null} for none
*/
public java.lang.String getMainPythonFileUri() {
return mainPythonFileUri;
}
/**
* Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file.
* @param mainPythonFileUri mainPythonFileUri or {@code null} for none
*/
public PySparkBatch setMainPythonFileUri(java.lang.String mainPythonFileUri) {
this.mainPythonFileUri = mainPythonFileUri;
return this;
}
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* @return value or {@code null} for none
*/
public java.util.List getPythonFileUris() {
return pythonFileUris;
}
/**
* Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file
* types: .py, .egg, and .zip.
* @param pythonFileUris pythonFileUris or {@code null} for none
*/
public PySparkBatch setPythonFileUris(java.util.List pythonFileUris) {
this.pythonFileUris = pythonFileUris;
return this;
}
@Override
public PySparkBatch set(String fieldName, Object value) {
return (PySparkBatch) super.set(fieldName, value);
}
@Override
public PySparkBatch clone() {
return (PySparkBatch) super.clone();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy