All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azurenative.datafactory.kotlin.outputs.AzureDatabricksLinkedServiceResponse.kt Maven / Gradle / Ivy

@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.azurenative.datafactory.kotlin.outputs

import com.pulumi.core.Either
import kotlin.Any
import kotlin.String
import kotlin.Suppress
import kotlin.collections.List
import kotlin.collections.Map

/**
 * Azure Databricks linked service.
 * @property accessToken Access token for databricks REST API. Refer to https://docs.azuredatabricks.net/api/latest/authentication.html. Type: string (or Expression with resultType string).
 * @property annotations List of tags that can be used for describing the linked service.
 * @property authentication Required to specify MSI, if using Workspace resource id for databricks REST API. Type: string (or Expression with resultType string).
 * @property connectVia The integration runtime reference.
 * @property credential The credential reference containing authentication information.
 * @property description Linked service description.
 * @property domain .azuredatabricks.net, domain name of your Databricks deployment. Type: string (or Expression with resultType string).
 * @property encryptedCredential The encrypted credential used for authentication. Credentials are encrypted using the integration runtime credential manager. Type: string.
 * @property existingClusterId The id of an existing interactive cluster that will be used for all runs of this activity. Type: string (or Expression with resultType string).
 * @property instancePoolId The id of an existing instance pool that will be used for all runs of this activity. Type: string (or Expression with resultType string).
 * @property newClusterCustomTags Additional tags for cluster resources. This property is ignored in instance pool configurations.
 * @property newClusterDriverNodeType The driver node type for the new job cluster. This property is ignored in instance pool configurations. Type: string (or Expression with resultType string).
 * @property newClusterEnableElasticDisk Enable the elastic disk on the new cluster. This property is now ignored, and takes the default elastic disk behavior in Databricks (elastic disks are always enabled). Type: boolean (or Expression with resultType boolean).
 * @property newClusterInitScripts User-defined initialization scripts for the new cluster. Type: array of strings (or Expression with resultType array of strings).
 * @property newClusterLogDestination Specify a location to deliver Spark driver, worker, and event logs. Type: string (or Expression with resultType string).
 * @property newClusterNodeType The node type of the new job cluster. This property is required if newClusterVersion is specified and instancePoolId is not specified. If instancePoolId is specified, this property is ignored. Type: string (or Expression with resultType string).
 * @property newClusterNumOfWorker If not using an existing interactive cluster, this specifies the number of worker nodes to use for the new job cluster or instance pool. For new job clusters, this a string-formatted Int32, like '1' means numOfWorker is 1 or '1:10' means auto-scale from 1 (min) to 10 (max). For instance pools, this is a string-formatted Int32, and can only specify a fixed number of worker nodes, such as '2'. Required if newClusterVersion is specified. Type: string (or Expression with resultType string).
 * @property newClusterSparkConf A set of optional, user-specified Spark configuration key-value pairs.
 * @property newClusterSparkEnvVars A set of optional, user-specified Spark environment variables key-value pairs.
 * @property newClusterVersion If not using an existing interactive cluster, this specifies the Spark version of a new job cluster or instance pool nodes created for each run of this activity. Required if instancePoolId is specified. Type: string (or Expression with resultType string).
 * @property parameters Parameters for linked service.
 * @property policyId The policy id for limiting the ability to configure clusters based on a user defined set of rules. Type: string (or Expression with resultType string).
 * @property type Type of linked service.
 * Expected value is 'AzureDatabricks'.
 * @property version Version of the linked service.
 * @property workspaceResourceId Workspace resource id for databricks REST API. Type: string (or Expression with resultType string).
 */
public data class AzureDatabricksLinkedServiceResponse(
    public val accessToken: Either? =
        null,
    public val annotations: List? = null,
    public val authentication: Any? = null,
    public val connectVia: IntegrationRuntimeReferenceResponse? = null,
    public val credential: CredentialReferenceResponse? = null,
    public val description: String? = null,
    public val domain: Any,
    public val encryptedCredential: String? = null,
    public val existingClusterId: Any? = null,
    public val instancePoolId: Any? = null,
    public val newClusterCustomTags: Map? = null,
    public val newClusterDriverNodeType: Any? = null,
    public val newClusterEnableElasticDisk: Any? = null,
    public val newClusterInitScripts: Any? = null,
    public val newClusterLogDestination: Any? = null,
    public val newClusterNodeType: Any? = null,
    public val newClusterNumOfWorker: Any? = null,
    public val newClusterSparkConf: Map? = null,
    public val newClusterSparkEnvVars: Map? = null,
    public val newClusterVersion: Any? = null,
    public val parameters: Map? = null,
    public val policyId: Any? = null,
    public val type: String,
    public val version: String? = null,
    public val workspaceResourceId: Any? = null,
) {
    public companion object {
        public fun toKotlin(javaType: com.pulumi.azurenative.datafactory.outputs.AzureDatabricksLinkedServiceResponse): AzureDatabricksLinkedServiceResponse = AzureDatabricksLinkedServiceResponse(
            accessToken = javaType.accessToken().map({ args0 ->
                args0.transform(
                    { args0 ->
                        args0.let({ args0 ->
                            com.pulumi.azurenative.datafactory.kotlin.outputs.AzureKeyVaultSecretReferenceResponse.Companion.toKotlin(args0)
                        })
                    },
                    { args0 ->
                        args0.let({ args0 ->
                            com.pulumi.azurenative.datafactory.kotlin.outputs.SecureStringResponse.Companion.toKotlin(args0)
                        })
                    },
                )
            }).orElse(null),
            annotations = javaType.annotations().map({ args0 -> args0 }),
            authentication = javaType.authentication().map({ args0 -> args0 }).orElse(null),
            connectVia = javaType.connectVia().map({ args0 ->
                args0.let({ args0 ->
                    com.pulumi.azurenative.datafactory.kotlin.outputs.IntegrationRuntimeReferenceResponse.Companion.toKotlin(args0)
                })
            }).orElse(null),
            credential = javaType.credential().map({ args0 ->
                args0.let({ args0 ->
                    com.pulumi.azurenative.datafactory.kotlin.outputs.CredentialReferenceResponse.Companion.toKotlin(args0)
                })
            }).orElse(null),
            description = javaType.description().map({ args0 -> args0 }).orElse(null),
            domain = javaType.domain(),
            encryptedCredential = javaType.encryptedCredential().map({ args0 -> args0 }).orElse(null),
            existingClusterId = javaType.existingClusterId().map({ args0 -> args0 }).orElse(null),
            instancePoolId = javaType.instancePoolId().map({ args0 -> args0 }).orElse(null),
            newClusterCustomTags = javaType.newClusterCustomTags().map({ args0 ->
                args0.key.to(args0.value)
            }).toMap(),
            newClusterDriverNodeType = javaType.newClusterDriverNodeType().map({ args0 -> args0 }).orElse(null),
            newClusterEnableElasticDisk = javaType.newClusterEnableElasticDisk().map({ args0 ->
                args0
            }).orElse(null),
            newClusterInitScripts = javaType.newClusterInitScripts().map({ args0 -> args0 }).orElse(null),
            newClusterLogDestination = javaType.newClusterLogDestination().map({ args0 -> args0 }).orElse(null),
            newClusterNodeType = javaType.newClusterNodeType().map({ args0 -> args0 }).orElse(null),
            newClusterNumOfWorker = javaType.newClusterNumOfWorker().map({ args0 -> args0 }).orElse(null),
            newClusterSparkConf = javaType.newClusterSparkConf().map({ args0 ->
                args0.key.to(args0.value)
            }).toMap(),
            newClusterSparkEnvVars = javaType.newClusterSparkEnvVars().map({ args0 ->
                args0.key.to(args0.value)
            }).toMap(),
            newClusterVersion = javaType.newClusterVersion().map({ args0 -> args0 }).orElse(null),
            parameters = javaType.parameters().map({ args0 ->
                args0.key.to(
                    args0.value.let({ args0 ->
                        com.pulumi.azurenative.datafactory.kotlin.outputs.ParameterSpecificationResponse.Companion.toKotlin(args0)
                    }),
                )
            }).toMap(),
            policyId = javaType.policyId().map({ args0 -> args0 }).orElse(null),
            type = javaType.type(),
            version = javaType.version().map({ args0 -> args0 }).orElse(null),
            workspaceResourceId = javaType.workspaceResourceId().map({ args0 -> args0 }).orElse(null),
        )
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy