All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.azurenative.media.kotlin.inputs.VideoAnalyzerPresetArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 2.82.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.azurenative.media.kotlin.inputs

import com.pulumi.azurenative.media.inputs.VideoAnalyzerPresetArgs.builder
import com.pulumi.azurenative.media.kotlin.enums.AudioAnalysisMode
import com.pulumi.azurenative.media.kotlin.enums.InsightsType
import com.pulumi.core.Either
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiNullFieldException
import com.pulumi.kotlin.PulumiTagMarker
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 * A video analyzer preset that extracts insights (rich metadata) from both audio and video, and outputs a JSON format file.
 * @property audioLanguage The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US').  If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If the language isn't specified or set to null, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463
 * @property experimentalOptions Dictionary containing key value pairs for parameters not exposed in the preset itself
 * @property insightsToExtract Defines the type of insights that you want the service to generate. The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
 * @property mode Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen.
 * @property odataType The discriminator for derived types.
 * Expected value is '#Microsoft.Media.VideoAnalyzerPreset'.
 */
public data class VideoAnalyzerPresetArgs(
    public val audioLanguage: Output? = null,
    public val experimentalOptions: Output>? = null,
    public val insightsToExtract: Output>? = null,
    public val mode: Output>? = null,
    public val odataType: Output,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.azurenative.media.inputs.VideoAnalyzerPresetArgs =
        com.pulumi.azurenative.media.inputs.VideoAnalyzerPresetArgs.builder()
            .audioLanguage(audioLanguage?.applyValue({ args0 -> args0 }))
            .experimentalOptions(
                experimentalOptions?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.key.to(args0.value)
                    }).toMap()
                }),
            )
            .insightsToExtract(
                insightsToExtract?.applyValue({ args0 ->
                    args0.transform(
                        { args0 -> args0 },
                        { args0 -> args0.let({ args0 -> args0.toJava() }) },
                    )
                }),
            )
            .mode(
                mode?.applyValue({ args0 ->
                    args0.transform({ args0 -> args0 }, { args0 ->
                        args0.let({ args0 ->
                            args0.toJava()
                        })
                    })
                }),
            )
            .odataType(odataType.applyValue({ args0 -> args0 })).build()
}

/**
 * Builder for [VideoAnalyzerPresetArgs].
 */
@PulumiTagMarker
public class VideoAnalyzerPresetArgsBuilder internal constructor() {
    private var audioLanguage: Output? = null

    private var experimentalOptions: Output>? = null

    private var insightsToExtract: Output>? = null

    private var mode: Output>? = null

    private var odataType: Output? = null

    /**
     * @param value The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US').  If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If the language isn't specified or set to null, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463
     */
    @JvmName("vlmseshboopoucgv")
    public suspend fun audioLanguage(`value`: Output) {
        this.audioLanguage = value
    }

    /**
     * @param value Dictionary containing key value pairs for parameters not exposed in the preset itself
     */
    @JvmName("fvxfrdpimcoigvcd")
    public suspend fun experimentalOptions(`value`: Output>) {
        this.experimentalOptions = value
    }

    /**
     * @param value Defines the type of insights that you want the service to generate. The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
     */
    @JvmName("gqjdlfwkjxodvkev")
    public suspend fun insightsToExtract(`value`: Output>) {
        this.insightsToExtract = value
    }

    /**
     * @param value Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen.
     */
    @JvmName("qelfjwvyxvqchnsy")
    public suspend fun mode(`value`: Output>) {
        this.mode = value
    }

    /**
     * @param value The discriminator for derived types.
     * Expected value is '#Microsoft.Media.VideoAnalyzerPreset'.
     */
    @JvmName("wqracjttnphhsnnk")
    public suspend fun odataType(`value`: Output) {
        this.odataType = value
    }

    /**
     * @param value The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US').  If you know the language of your content, it is recommended that you specify it. The language must be specified explicitly for AudioAnalysisMode::Basic, since automatic language detection is not included in basic mode. If the language isn't specified or set to null, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463
     */
    @JvmName("gvlmmyaptlyutyen")
    public suspend fun audioLanguage(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.audioLanguage = mapped
    }

    /**
     * @param value Dictionary containing key value pairs for parameters not exposed in the preset itself
     */
    @JvmName("skfudfludoihbrre")
    public suspend fun experimentalOptions(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.experimentalOptions = mapped
    }

    /**
     * @param values Dictionary containing key value pairs for parameters not exposed in the preset itself
     */
    @JvmName("rrgrkuteucamubvd")
    public fun experimentalOptions(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.experimentalOptions = mapped
    }

    /**
     * @param value Defines the type of insights that you want the service to generate. The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
     */
    @JvmName("jyjucjmkbjftfpty")
    public suspend fun insightsToExtract(`value`: Either?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.insightsToExtract = mapped
    }

    /**
     * @param value Defines the type of insights that you want the service to generate. The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
     */
    @JvmName("autnvosyrwtbnjrx")
    public fun insightsToExtract(`value`: String) {
        val toBeMapped = Either.ofLeft(value)
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.insightsToExtract = mapped
    }

    /**
     * @param value Defines the type of insights that you want the service to generate. The allowed values are 'AudioInsightsOnly', 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you set this to AllInsights and the input is audio only, then only audio insights are generated. Similarly if the input is video only, then only video insights are generated. It is recommended that you not use AudioInsightsOnly if you expect some of your inputs to be video only; or use VideoInsightsOnly if you expect some of your inputs to be audio only. Your Jobs in such conditions would error out.
     */
    @JvmName("niaqwkferowemgik")
    public fun insightsToExtract(`value`: InsightsType) {
        val toBeMapped = Either.ofRight(value)
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.insightsToExtract = mapped
    }

    /**
     * @param value Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen.
     */
    @JvmName("qdeeunqviyglhdlf")
    public suspend fun mode(`value`: Either?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.mode = mapped
    }

    /**
     * @param value Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen.
     */
    @JvmName("sbhbdjgspyoudefn")
    public fun mode(`value`: String) {
        val toBeMapped = Either.ofLeft(value)
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.mode = mapped
    }

    /**
     * @param value Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen.
     */
    @JvmName("coxlxajfglujmpcl")
    public fun mode(`value`: AudioAnalysisMode) {
        val toBeMapped = Either.ofRight(value)
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.mode = mapped
    }

    /**
     * @param value The discriminator for derived types.
     * Expected value is '#Microsoft.Media.VideoAnalyzerPreset'.
     */
    @JvmName("fielapgktbhkciah")
    public suspend fun odataType(`value`: String) {
        val toBeMapped = value
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.odataType = mapped
    }

    internal fun build(): VideoAnalyzerPresetArgs = VideoAnalyzerPresetArgs(
        audioLanguage = audioLanguage,
        experimentalOptions = experimentalOptions,
        insightsToExtract = insightsToExtract,
        mode = mode,
        odataType = odataType ?: throw PulumiNullFieldException("odataType"),
    )
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy