Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.gcp.dataproc.kotlin.inputs
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs.builder
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
import kotlin.jvm.JvmName
/**
*
* @property archiveUris HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
* @property args The arguments to pass to the driver. Do not include arguments that can be set as batch
* properties, such as --conf, since a collision can occur that causes an incorrect batch submission.
* @property fileUris HCFS URIs of files to be placed in the working directory of each executor.
* @property jarFileUris HCFS URIs of jar files to add to the classpath of the Spark driver and tasks.
* @property mainClass The name of the driver main class. The jar file that contains the class must be in the
* classpath or specified in jarFileUris.
* @property mainJarFileUri The HCFS URI of the jar file that contains the main class.
*/
public data class BatchSparkBatchArgs(
public val archiveUris: Output>? = null,
public val args: Output>? = null,
public val fileUris: Output>? = null,
public val jarFileUris: Output>? = null,
public val mainClass: Output? = null,
public val mainJarFileUri: Output? = null,
) : ConvertibleToJava {
override fun toJava(): com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs =
com.pulumi.gcp.dataproc.inputs.BatchSparkBatchArgs.builder()
.archiveUris(archiveUris?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
.args(args?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
.fileUris(fileUris?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
.jarFileUris(jarFileUris?.applyValue({ args0 -> args0.map({ args0 -> args0 }) }))
.mainClass(mainClass?.applyValue({ args0 -> args0 }))
.mainJarFileUri(mainJarFileUri?.applyValue({ args0 -> args0 })).build()
}
/**
* Builder for [BatchSparkBatchArgs].
*/
@PulumiTagMarker
public class BatchSparkBatchArgsBuilder internal constructor() {
private var archiveUris: Output>? = null
private var args: Output>? = null
private var fileUris: Output>? = null
private var jarFileUris: Output>? = null
private var mainClass: Output? = null
private var mainJarFileUri: Output? = null
/**
* @param value HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
*/
@JvmName("xcddsjhnxtymhqcj")
public suspend fun archiveUris(`value`: Output>) {
this.archiveUris = value
}
@JvmName("fkkkcpgkhotnahlh")
public suspend fun archiveUris(vararg values: Output) {
this.archiveUris = Output.all(values.asList())
}
/**
* @param values HCFS URIs of archives to be extracted into the working directory of each executor.
* Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
*/
@JvmName("saniejysfvlksndc")
public suspend fun archiveUris(values: List