com.pulumi.azure.datafactory.kotlin.outputs.LinkedServiceAzureDatabricksNewClusterConfig.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of pulumi-azure-kotlin Show documentation
Show all versions of pulumi-azure-kotlin Show documentation
Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.
@file:Suppress("NAME_SHADOWING", "DEPRECATION")
package com.pulumi.azure.datafactory.kotlin.outputs
import kotlin.Int
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
import kotlin.collections.Map
/**
*
* @property clusterVersion Spark version of a the cluster.
* @property customTags Tags for the cluster resource.
* @property driverNodeType Driver node type for the cluster.
* @property initScripts User defined initialization scripts for the cluster.
* @property logDestination Location to deliver Spark driver, worker, and event logs.
* @property maxNumberOfWorkers Specifies the maximum number of worker nodes. It should be between 1 and 25000.
* @property minNumberOfWorkers Specifies the minimum number of worker nodes. It should be between 1 and 25000. It defaults to `1`.
* @property nodeType Node type for the new cluster.
* @property sparkConfig User-specified Spark configuration variables key-value pairs.
* @property sparkEnvironmentVariables User-specified Spark environment variables key-value pairs.
*/
public data class LinkedServiceAzureDatabricksNewClusterConfig(
public val clusterVersion: String,
public val customTags: Map? = null,
public val driverNodeType: String? = null,
public val initScripts: List? = null,
public val logDestination: String? = null,
public val maxNumberOfWorkers: Int? = null,
public val minNumberOfWorkers: Int? = null,
public val nodeType: String,
public val sparkConfig: Map? = null,
public val sparkEnvironmentVariables: Map? = null,
) {
public companion object {
public
fun toKotlin(javaType: com.pulumi.azure.datafactory.outputs.LinkedServiceAzureDatabricksNewClusterConfig):
LinkedServiceAzureDatabricksNewClusterConfig = LinkedServiceAzureDatabricksNewClusterConfig(
clusterVersion = javaType.clusterVersion(),
customTags = javaType.customTags().map({ args0 -> args0.key.to(args0.value) }).toMap(),
driverNodeType = javaType.driverNodeType().map({ args0 -> args0 }).orElse(null),
initScripts = javaType.initScripts().map({ args0 -> args0 }),
logDestination = javaType.logDestination().map({ args0 -> args0 }).orElse(null),
maxNumberOfWorkers = javaType.maxNumberOfWorkers().map({ args0 -> args0 }).orElse(null),
minNumberOfWorkers = javaType.minNumberOfWorkers().map({ args0 -> args0 }).orElse(null),
nodeType = javaType.nodeType(),
sparkConfig = javaType.sparkConfig().map({ args0 -> args0.key.to(args0.value) }).toMap(),
sparkEnvironmentVariables = javaType.sparkEnvironmentVariables().map({ args0 ->
args0.key.to(args0.value)
}).toMap(),
)
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy