All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.chimesdkmediapipelines.model.AmazonTranscribeProcessorConfiguration.kt Maven / Gradle / Ivy

The newest version!
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.chimesdkmediapipelines.model

import aws.smithy.kotlin.runtime.SdkDsl

/**
 * A structure that contains the configuration settings for an Amazon Transcribe processor.
 *
 * Calls to this API must include a `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` parameter. If you include more than one of those parameters, your transcription job fails.
 */
public class AmazonTranscribeProcessorConfiguration private constructor(builder: Builder) {
    /**
     * Labels all personally identifiable information (PII) identified in your transcript.
     *
     * Content identification is performed at the segment level; PII specified in `PiiEntityTypes` is flagged upon complete transcription of an audio segment.
     *
     * You can’t set `ContentIdentificationType` and `ContentRedactionType` in the same request. If you set both, your request returns a `BadRequestException`.
     *
     * For more information, see [Redacting or identifying personally identifiable information](https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html) in the *Amazon Transcribe Developer Guide*.
     */
    public val contentIdentificationType: aws.sdk.kotlin.services.chimesdkmediapipelines.model.ContentType? = builder.contentIdentificationType
    /**
     * Redacts all personally identifiable information (PII) identified in your transcript.
     *
     * Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment.
     *
     * You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a `BadRequestException`.
     *
     * For more information, see [Redacting or identifying personally identifiable information](https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html) in the *Amazon Transcribe Developer Guide*.
     */
    public val contentRedactionType: aws.sdk.kotlin.services.chimesdkmediapipelines.model.ContentType? = builder.contentRedactionType
    /**
     * Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy.
     *
     * For more information, see [Partial-result stabilization](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization) in the *Amazon Transcribe Developer Guide*.
     */
    public val enablePartialResultsStabilization: kotlin.Boolean = builder.enablePartialResultsStabilization
    /**
     * If true, `TranscriptEvents` with `IsPartial: true` are filtered out of the insights target.
     */
    public val filterPartialResults: kotlin.Boolean = builder.filterPartialResults
    /**
     * Turns language identification on or off.
     */
    public val identifyLanguage: kotlin.Boolean = builder.identifyLanguage
    /**
     * Turns language identification on or off for multiple languages.
     *
     * Calls to this API must include a `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` parameter. If you include more than one of those parameters, your transcription job fails.
     */
    public val identifyMultipleLanguages: kotlin.Boolean = builder.identifyMultipleLanguages
    /**
     * The language code that represents the language spoken in your audio.
     *
     * If you're unsure of the language spoken in your audio, consider using `IdentifyLanguage` to enable automatic language identification.
     *
     * For a list of languages that real-time Call Analytics supports, see the [Supported languages table](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) in the *Amazon Transcribe Developer Guide*.
     */
    public val languageCode: aws.sdk.kotlin.services.chimesdkmediapipelines.model.CallAnalyticsLanguageCode? = builder.languageCode
    /**
     * The name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.
     *
     * The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
     *
     * For more information, see [Custom language models](https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html) in the *Amazon Transcribe Developer Guide*.
     */
    public val languageModelName: kotlin.String? = builder.languageModelName
    /**
     * The language options for the transcription, such as automatic language detection.
     */
    public val languageOptions: kotlin.String? = builder.languageOptions
    /**
     * The level of stability to use when you enable partial results stabilization (`EnablePartialResultsStabilization`).
     *
     * Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.
     *
     * For more information, see [Partial-result stabilization](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization) in the *Amazon Transcribe Developer Guide*.
     */
    public val partialResultsStability: aws.sdk.kotlin.services.chimesdkmediapipelines.model.PartialResultsStability? = builder.partialResultsStability
    /**
     * The types of personally identifiable information (PII) to redact from a transcript. You can include as many types as you'd like, or you can select `ALL`.
     *
     * To include `PiiEntityTypes` in your Call Analytics request, you must also include `ContentIdentificationType` or `ContentRedactionType`, but you can't include both.
     *
     * Values must be comma-separated and can include: `ADDRESS`, `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `CREDIT_DEBIT_NUMBER`, `EMAIL`, `NAME`, `PHONE`, `PIN`, `SSN`, or `ALL`.
     *
     * If you leave this parameter empty, the default behavior is equivalent to `ALL`.
     */
    public val piiEntityTypes: kotlin.String? = builder.piiEntityTypes
    /**
     * The preferred language for the transcription.
     */
    public val preferredLanguage: aws.sdk.kotlin.services.chimesdkmediapipelines.model.CallAnalyticsLanguageCode? = builder.preferredLanguage
    /**
     * Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
     *
     * For more information, see [Partitioning speakers (diarization)](https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html) in the *Amazon Transcribe Developer Guide*.
     */
    public val showSpeakerLabel: kotlin.Boolean = builder.showSpeakerLabel
    /**
     * The vocabulary filtering method used in your Call Analytics transcription.
     */
    public val vocabularyFilterMethod: aws.sdk.kotlin.services.chimesdkmediapipelines.model.VocabularyFilterMethod? = builder.vocabularyFilterMethod
    /**
     * The name of the custom vocabulary filter that you specified in your Call Analytics request.
     *
     * Length Constraints: Minimum length of 1. Maximum length of 200.
     */
    public val vocabularyFilterName: kotlin.String? = builder.vocabularyFilterName
    /**
     * The names of the custom vocabulary filter or filters using during transcription.
     */
    public val vocabularyFilterNames: kotlin.String? = builder.vocabularyFilterNames
    /**
     * The name of the custom vocabulary that you specified in your Call Analytics request.
     *
     * Length Constraints: Minimum length of 1. Maximum length of 200.
     */
    public val vocabularyName: kotlin.String? = builder.vocabularyName
    /**
     * The names of the custom vocabulary or vocabularies used during transcription.
     */
    public val vocabularyNames: kotlin.String? = builder.vocabularyNames

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.chimesdkmediapipelines.model.AmazonTranscribeProcessorConfiguration = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("AmazonTranscribeProcessorConfiguration(")
        append("contentIdentificationType=$contentIdentificationType,")
        append("contentRedactionType=$contentRedactionType,")
        append("enablePartialResultsStabilization=$enablePartialResultsStabilization,")
        append("filterPartialResults=$filterPartialResults,")
        append("identifyLanguage=$identifyLanguage,")
        append("identifyMultipleLanguages=$identifyMultipleLanguages,")
        append("languageCode=$languageCode,")
        append("languageModelName=$languageModelName,")
        append("languageOptions=$languageOptions,")
        append("partialResultsStability=$partialResultsStability,")
        append("piiEntityTypes=$piiEntityTypes,")
        append("preferredLanguage=$preferredLanguage,")
        append("showSpeakerLabel=$showSpeakerLabel,")
        append("vocabularyFilterMethod=$vocabularyFilterMethod,")
        append("vocabularyFilterName=$vocabularyFilterName,")
        append("vocabularyFilterNames=$vocabularyFilterNames,")
        append("vocabularyName=$vocabularyName,")
        append("vocabularyNames=$vocabularyNames")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = contentIdentificationType?.hashCode() ?: 0
        result = 31 * result + (contentRedactionType?.hashCode() ?: 0)
        result = 31 * result + (enablePartialResultsStabilization.hashCode())
        result = 31 * result + (filterPartialResults.hashCode())
        result = 31 * result + (identifyLanguage.hashCode())
        result = 31 * result + (identifyMultipleLanguages.hashCode())
        result = 31 * result + (languageCode?.hashCode() ?: 0)
        result = 31 * result + (languageModelName?.hashCode() ?: 0)
        result = 31 * result + (languageOptions?.hashCode() ?: 0)
        result = 31 * result + (partialResultsStability?.hashCode() ?: 0)
        result = 31 * result + (piiEntityTypes?.hashCode() ?: 0)
        result = 31 * result + (preferredLanguage?.hashCode() ?: 0)
        result = 31 * result + (showSpeakerLabel.hashCode())
        result = 31 * result + (vocabularyFilterMethod?.hashCode() ?: 0)
        result = 31 * result + (vocabularyFilterName?.hashCode() ?: 0)
        result = 31 * result + (vocabularyFilterNames?.hashCode() ?: 0)
        result = 31 * result + (vocabularyName?.hashCode() ?: 0)
        result = 31 * result + (vocabularyNames?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as AmazonTranscribeProcessorConfiguration

        if (contentIdentificationType != other.contentIdentificationType) return false
        if (contentRedactionType != other.contentRedactionType) return false
        if (enablePartialResultsStabilization != other.enablePartialResultsStabilization) return false
        if (filterPartialResults != other.filterPartialResults) return false
        if (identifyLanguage != other.identifyLanguage) return false
        if (identifyMultipleLanguages != other.identifyMultipleLanguages) return false
        if (languageCode != other.languageCode) return false
        if (languageModelName != other.languageModelName) return false
        if (languageOptions != other.languageOptions) return false
        if (partialResultsStability != other.partialResultsStability) return false
        if (piiEntityTypes != other.piiEntityTypes) return false
        if (preferredLanguage != other.preferredLanguage) return false
        if (showSpeakerLabel != other.showSpeakerLabel) return false
        if (vocabularyFilterMethod != other.vocabularyFilterMethod) return false
        if (vocabularyFilterName != other.vocabularyFilterName) return false
        if (vocabularyFilterNames != other.vocabularyFilterNames) return false
        if (vocabularyName != other.vocabularyName) return false
        if (vocabularyNames != other.vocabularyNames) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.chimesdkmediapipelines.model.AmazonTranscribeProcessorConfiguration = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * Labels all personally identifiable information (PII) identified in your transcript.
         *
         * Content identification is performed at the segment level; PII specified in `PiiEntityTypes` is flagged upon complete transcription of an audio segment.
         *
         * You can’t set `ContentIdentificationType` and `ContentRedactionType` in the same request. If you set both, your request returns a `BadRequestException`.
         *
         * For more information, see [Redacting or identifying personally identifiable information](https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html) in the *Amazon Transcribe Developer Guide*.
         */
        public var contentIdentificationType: aws.sdk.kotlin.services.chimesdkmediapipelines.model.ContentType? = null
        /**
         * Redacts all personally identifiable information (PII) identified in your transcript.
         *
         * Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment.
         *
         * You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a `BadRequestException`.
         *
         * For more information, see [Redacting or identifying personally identifiable information](https://docs.aws.amazon.com/transcribe/latest/dg/pii-redaction.html) in the *Amazon Transcribe Developer Guide*.
         */
        public var contentRedactionType: aws.sdk.kotlin.services.chimesdkmediapipelines.model.ContentType? = null
        /**
         * Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy.
         *
         * For more information, see [Partial-result stabilization](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization) in the *Amazon Transcribe Developer Guide*.
         */
        public var enablePartialResultsStabilization: kotlin.Boolean = false
        /**
         * If true, `TranscriptEvents` with `IsPartial: true` are filtered out of the insights target.
         */
        public var filterPartialResults: kotlin.Boolean = false
        /**
         * Turns language identification on or off.
         */
        public var identifyLanguage: kotlin.Boolean = false
        /**
         * Turns language identification on or off for multiple languages.
         *
         * Calls to this API must include a `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` parameter. If you include more than one of those parameters, your transcription job fails.
         */
        public var identifyMultipleLanguages: kotlin.Boolean = false
        /**
         * The language code that represents the language spoken in your audio.
         *
         * If you're unsure of the language spoken in your audio, consider using `IdentifyLanguage` to enable automatic language identification.
         *
         * For a list of languages that real-time Call Analytics supports, see the [Supported languages table](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) in the *Amazon Transcribe Developer Guide*.
         */
        public var languageCode: aws.sdk.kotlin.services.chimesdkmediapipelines.model.CallAnalyticsLanguageCode? = null
        /**
         * The name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive.
         *
         * The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
         *
         * For more information, see [Custom language models](https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html) in the *Amazon Transcribe Developer Guide*.
         */
        public var languageModelName: kotlin.String? = null
        /**
         * The language options for the transcription, such as automatic language detection.
         */
        public var languageOptions: kotlin.String? = null
        /**
         * The level of stability to use when you enable partial results stabilization (`EnablePartialResultsStabilization`).
         *
         * Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.
         *
         * For more information, see [Partial-result stabilization](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html#streaming-partial-result-stabilization) in the *Amazon Transcribe Developer Guide*.
         */
        public var partialResultsStability: aws.sdk.kotlin.services.chimesdkmediapipelines.model.PartialResultsStability? = null
        /**
         * The types of personally identifiable information (PII) to redact from a transcript. You can include as many types as you'd like, or you can select `ALL`.
         *
         * To include `PiiEntityTypes` in your Call Analytics request, you must also include `ContentIdentificationType` or `ContentRedactionType`, but you can't include both.
         *
         * Values must be comma-separated and can include: `ADDRESS`, `BANK_ACCOUNT_NUMBER`, `BANK_ROUTING`, `CREDIT_DEBIT_CVV`, `CREDIT_DEBIT_EXPIRY`, `CREDIT_DEBIT_NUMBER`, `EMAIL`, `NAME`, `PHONE`, `PIN`, `SSN`, or `ALL`.
         *
         * If you leave this parameter empty, the default behavior is equivalent to `ALL`.
         */
        public var piiEntityTypes: kotlin.String? = null
        /**
         * The preferred language for the transcription.
         */
        public var preferredLanguage: aws.sdk.kotlin.services.chimesdkmediapipelines.model.CallAnalyticsLanguageCode? = null
        /**
         * Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
         *
         * For more information, see [Partitioning speakers (diarization)](https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html) in the *Amazon Transcribe Developer Guide*.
         */
        public var showSpeakerLabel: kotlin.Boolean = false
        /**
         * The vocabulary filtering method used in your Call Analytics transcription.
         */
        public var vocabularyFilterMethod: aws.sdk.kotlin.services.chimesdkmediapipelines.model.VocabularyFilterMethod? = null
        /**
         * The name of the custom vocabulary filter that you specified in your Call Analytics request.
         *
         * Length Constraints: Minimum length of 1. Maximum length of 200.
         */
        public var vocabularyFilterName: kotlin.String? = null
        /**
         * The names of the custom vocabulary filter or filters using during transcription.
         */
        public var vocabularyFilterNames: kotlin.String? = null
        /**
         * The name of the custom vocabulary that you specified in your Call Analytics request.
         *
         * Length Constraints: Minimum length of 1. Maximum length of 200.
         */
        public var vocabularyName: kotlin.String? = null
        /**
         * The names of the custom vocabulary or vocabularies used during transcription.
         */
        public var vocabularyNames: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.chimesdkmediapipelines.model.AmazonTranscribeProcessorConfiguration) : this() {
            this.contentIdentificationType = x.contentIdentificationType
            this.contentRedactionType = x.contentRedactionType
            this.enablePartialResultsStabilization = x.enablePartialResultsStabilization
            this.filterPartialResults = x.filterPartialResults
            this.identifyLanguage = x.identifyLanguage
            this.identifyMultipleLanguages = x.identifyMultipleLanguages
            this.languageCode = x.languageCode
            this.languageModelName = x.languageModelName
            this.languageOptions = x.languageOptions
            this.partialResultsStability = x.partialResultsStability
            this.piiEntityTypes = x.piiEntityTypes
            this.preferredLanguage = x.preferredLanguage
            this.showSpeakerLabel = x.showSpeakerLabel
            this.vocabularyFilterMethod = x.vocabularyFilterMethod
            this.vocabularyFilterName = x.vocabularyFilterName
            this.vocabularyFilterNames = x.vocabularyFilterNames
            this.vocabularyName = x.vocabularyName
            this.vocabularyNames = x.vocabularyNames
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.chimesdkmediapipelines.model.AmazonTranscribeProcessorConfiguration = AmazonTranscribeProcessorConfiguration(this)

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy