All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.transcribe.model.StartTranscriptionJobRequest.kt Maven / Gradle / Ivy

// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.transcribe.model



public class StartTranscriptionJobRequest private constructor(builder: Builder) {
    /**
     * Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If you use `ContentRedaction`, you must also include the sub-parameters: `RedactionOutput` and `RedactionType`. You can optionally include `PiiEntityTypes` to choose which types of PII you want to redact. If you do not include `PiiEntityTypes` in your request, all PII is redacted.
     */
    public val contentRedaction: aws.sdk.kotlin.services.transcribe.model.ContentRedaction? = builder.contentRedaction
    /**
     * Enables automatic language identification in your transcription job request. Use this parameter if your media file contains only one language. If your media contains multiple languages, use `IdentifyMultipleLanguages` instead.
     *
     * If you include `IdentifyLanguage`, you can optionally include a list of language codes, using `LanguageOptions`, that you think may be present in your media file. Including `LanguageOptions` restricts `IdentifyLanguage` to only the language options that you specify, which can improve transcription accuracy.
     *
     * If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your automatic language identification request, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName`, `LanguageModelName`, and `VocabularyFilterName`). If you include `LanguageIdSettings`, also include `LanguageOptions`.
     *
     * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
     */
    public val identifyLanguage: kotlin.Boolean? = builder.identifyLanguage
    /**
     * Enables automatic multi-language identification in your transcription job request. Use this parameter if your media file contains more than one language. If your media contains only one language, use `IdentifyLanguage` instead.
     *
     * If you include `IdentifyMultipleLanguages`, you can optionally include a list of language codes, using `LanguageOptions`, that you think may be present in your media file. Including `LanguageOptions` restricts `IdentifyLanguage` to only the language options that you specify, which can improve transcription accuracy.
     *
     * If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification request, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName` and `VocabularyFilterName`). If you include `LanguageIdSettings`, also include `LanguageOptions`.
     *
     * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
     */
    public val identifyMultipleLanguages: kotlin.Boolean? = builder.identifyMultipleLanguages
    /**
     * Makes it possible to control how your transcription job is processed. Currently, the only `JobExecutionSettings` modification you can choose is enabling job queueing using the `AllowDeferredExecution` sub-parameter.
     *
     * If you include `JobExecutionSettings` in your request, you must also include the sub-parameters: `AllowDeferredExecution` and `DataAccessRoleArn`.
     */
    public val jobExecutionSettings: aws.sdk.kotlin.services.transcribe.model.JobExecutionSettings? = builder.jobExecutionSettings
    /**
     * A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer of security for your data. For more information, see [KMS encryption context](https://docs.aws.amazon.com/transcribe/latest/dg/key-management.html#kms-context) and [Asymmetric keys in KMS](https://docs.aws.amazon.com/transcribe/latest/dg/symmetric-asymmetric.html).
     */
    public val kmsEncryptionContext: Map? = builder.kmsEncryptionContext
    /**
     * The language code that represents the language spoken in the input media file.
     *
     * If you're unsure of the language spoken in your media file, consider using `IdentifyLanguage` or `IdentifyMultipleLanguages` to enable automatic language identification.
     *
     * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
     *
     * For a list of supported languages and their associated language codes, refer to the [Supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) table.
     *
     * To transcribe speech in Modern Standard Arabic (`ar-SA`), your media file must be encoded at a sample rate of 16,000 Hz or higher.
     */
    public val languageCode: aws.sdk.kotlin.services.transcribe.model.LanguageCode? = builder.languageCode
    /**
     * If using automatic language identification in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName`, `LanguageModelName`, and `VocabularyFilterName`). Note that multi-language identification (`IdentifyMultipleLanguages`) doesn't support custom language models.
     *
     * `LanguageIdSettings` supports two to five language codes. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you specify must match the languages of the associated custom language models, custom vocabularies, and custom vocabulary filters.
     *
     * It's recommended that you include `LanguageOptions` when using `LanguageIdSettings` to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is in `en-US` but Amazon Transcribe determines that the language spoken in your media is `en-AU`, your custom vocabulary *is not* applied to your transcription. If you include `LanguageOptions` and include `en-US` as the only English language dialect, your custom vocabulary *is* applied to your transcription.
     *
     * If you want to include a custom language model with your request but **do not** want to use automatic language identification, use instead the `` parameter with the `LanguageModelName` sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but **do not** want to use automatic language identification, use instead the `` parameter with the `VocabularyName` or `VocabularyFilterName` (or both) sub-parameter.
     */
    public val languageIdSettings: Map? = builder.languageIdSettings
    /**
     * You can specify two or more language codes that represent the languages you think may be present in your media. Including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
     *
     * If you include `LanguageOptions` in your request, you must also include `IdentifyLanguage`.
     *
     * For more information, refer to [Supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html).
     *
     * To transcribe speech in Modern Standard Arabic (`ar-SA`), your media file must be encoded at a sample rate of 16,000 Hz or higher.
     */
    public val languageOptions: List? = builder.languageOptions
    /**
     * Describes the Amazon S3 location of the media file you want to use in your request.
     */
    public val media: aws.sdk.kotlin.services.transcribe.model.Media? = builder.media
    /**
     * Specify the format of your input media file.
     */
    public val mediaFormat: aws.sdk.kotlin.services.transcribe.model.MediaFormat? = builder.mediaFormat
    /**
     * The sample rate, in hertz, of the audio track in your input media file.
     *
     * If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you specify and the value detected, your job fails. In most cases, you can omit `MediaSampleRateHertz` and let Amazon Transcribe determine the sample rate.
     */
    public val mediaSampleRateHertz: kotlin.Int? = builder.mediaSampleRateHertz
    /**
     * Specify the custom language model you want to include with your transcription job. If you include `ModelSettings` in your request, you must include the `LanguageModelName` sub-parameter.
     *
     * For more information, see [Custom language models](https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html).
     */
    public val modelSettings: aws.sdk.kotlin.services.transcribe.model.ModelSettings? = builder.modelSettings
    /**
     * The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the `S3://` prefix of the specified bucket.
     *
     * If you want your output to go to a sub-folder of this bucket, specify it using the `OutputKey` parameter; `OutputBucketName` only accepts the name of a bucket.
     *
     * For example, if you want your output stored in `S3://DOC-EXAMPLE-BUCKET`, set `OutputBucketName` to `DOC-EXAMPLE-BUCKET`. However, if you want your output stored in `S3://DOC-EXAMPLE-BUCKET/test-files/`, set `OutputBucketName` to `DOC-EXAMPLE-BUCKET` and `OutputKey` to `test-files/`.
     *
     * Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3 permissions using the [Amazon Web Services Management Console](https://console.aws.amazon.com/s3). See also [Permissions Required for IAM User Roles](https://docs.aws.amazon.com/transcribe/latest/dg/security_iam_id-based-policy-examples.html#auth-role-iam-user).
     *
     * If you do not specify `OutputBucketName`, your transcript is placed in a service-managed Amazon S3 bucket and you are provided with a URI to access your transcript.
     */
    public val outputBucketName: kotlin.String? = builder.outputBucketName
    /**
     * The KMS key you want to use to encrypt your transcription output.
     *
     * If using a key located in the **current** Amazon Web Services account, you can specify your KMS key in one of four ways:
     * + Use the KMS key ID itself. For example, `1234abcd-12ab-34cd-56ef-1234567890ab`.
     * + Use an alias for the KMS key ID. For example, `alias/ExampleAlias`.
     * + Use the Amazon Resource Name (ARN) for the KMS key ID. For example, `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
     * + Use the ARN for the KMS key alias. For example, `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
     *
     * If using a key located in a **different** Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways:
     * + Use the ARN for the KMS key ID. For example, `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
     * + Use the ARN for the KMS key alias. For example, `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
     *
     * If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
     *
     * If you specify a KMS key to encrypt your output, you must also specify an output location using the `OutputLocation` parameter.
     *
     * Note that the role making the request must have permission to use the specified KMS key.
     */
    public val outputEncryptionKmsKeyId: kotlin.String? = builder.outputEncryptionKmsKeyId
    /**
     * Use in combination with `OutputBucketName` to specify the output location of your transcript and, optionally, a unique name for your output file. The default name for your transcription output is the same as the name you specified for your transcription job (`TranscriptionJobName`).
     *
     * Here are some examples of how you can use `OutputKey`:
     * + If you specify 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName` and 'my-transcript.json' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/my-transcript.json`.
     * + If you specify 'my-first-transcription' as the `TranscriptionJobName`, 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName`, and 'my-transcript' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json`.
     * + If you specify 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName` and 'test-files/my-transcript.json' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json`.
     * + If you specify 'my-first-transcription' as the `TranscriptionJobName`, 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName`, and 'test-files/my-transcript' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json`.
     *
     * If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
     */
    public val outputKey: kotlin.String? = builder.outputKey
    /**
     * Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
     *
     * If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but **do not** want to use automatic language identification, use `Settings` with the `VocabularyName` or `VocabularyFilterName` (or both) sub-parameter.
     *
     * If you're using automatic language identification with your request and want to include a custom language model, a custom vocabulary, or a custom vocabulary filter, use instead the `` parameter with the `LanguageModelName`, `VocabularyName` or `VocabularyFilterName` sub-parameters.
     */
    public val settings: aws.sdk.kotlin.services.transcribe.model.Settings? = builder.settings
    /**
     * Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
     */
    public val subtitles: aws.sdk.kotlin.services.transcribe.model.Subtitles? = builder.subtitles
    /**
     * Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you start this new job.
     *
     * To learn more about using tags with Amazon Transcribe, refer to [Tagging resources](https://docs.aws.amazon.com/transcribe/latest/dg/tagging.html).
     */
    public val tags: List? = builder.tags
    /**
     * Enables toxic speech detection in your transcript. If you include `ToxicityDetection` in your request, you must also include `ToxicityCategories`.
     *
     * For information on the types of toxic speech Amazon Transcribe can detect, see [Detecting toxic speech](https://docs.aws.amazon.com/transcribe/latest/dg/toxic-language.html).
     */
    public val toxicityDetection: List? = builder.toxicityDetection
    /**
     * A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the `OutputKey` parameter.
     *
     * This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a `ConflictException` error.
     */
    public val transcriptionJobName: kotlin.String? = builder.transcriptionJobName

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.transcribe.model.StartTranscriptionJobRequest = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("StartTranscriptionJobRequest(")
        append("contentRedaction=$contentRedaction,")
        append("identifyLanguage=$identifyLanguage,")
        append("identifyMultipleLanguages=$identifyMultipleLanguages,")
        append("jobExecutionSettings=$jobExecutionSettings,")
        append("kmsEncryptionContext=$kmsEncryptionContext,")
        append("languageCode=$languageCode,")
        append("languageIdSettings=$languageIdSettings,")
        append("languageOptions=$languageOptions,")
        append("media=$media,")
        append("mediaFormat=$mediaFormat,")
        append("mediaSampleRateHertz=$mediaSampleRateHertz,")
        append("modelSettings=$modelSettings,")
        append("outputBucketName=$outputBucketName,")
        append("outputEncryptionKmsKeyId=$outputEncryptionKmsKeyId,")
        append("outputKey=$outputKey,")
        append("settings=$settings,")
        append("subtitles=$subtitles,")
        append("tags=$tags,")
        append("toxicityDetection=$toxicityDetection,")
        append("transcriptionJobName=$transcriptionJobName")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = contentRedaction?.hashCode() ?: 0
        result = 31 * result + (identifyLanguage?.hashCode() ?: 0)
        result = 31 * result + (identifyMultipleLanguages?.hashCode() ?: 0)
        result = 31 * result + (jobExecutionSettings?.hashCode() ?: 0)
        result = 31 * result + (kmsEncryptionContext?.hashCode() ?: 0)
        result = 31 * result + (languageCode?.hashCode() ?: 0)
        result = 31 * result + (languageIdSettings?.hashCode() ?: 0)
        result = 31 * result + (languageOptions?.hashCode() ?: 0)
        result = 31 * result + (media?.hashCode() ?: 0)
        result = 31 * result + (mediaFormat?.hashCode() ?: 0)
        result = 31 * result + (mediaSampleRateHertz ?: 0)
        result = 31 * result + (modelSettings?.hashCode() ?: 0)
        result = 31 * result + (outputBucketName?.hashCode() ?: 0)
        result = 31 * result + (outputEncryptionKmsKeyId?.hashCode() ?: 0)
        result = 31 * result + (outputKey?.hashCode() ?: 0)
        result = 31 * result + (settings?.hashCode() ?: 0)
        result = 31 * result + (subtitles?.hashCode() ?: 0)
        result = 31 * result + (tags?.hashCode() ?: 0)
        result = 31 * result + (toxicityDetection?.hashCode() ?: 0)
        result = 31 * result + (transcriptionJobName?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as StartTranscriptionJobRequest

        if (contentRedaction != other.contentRedaction) return false
        if (identifyLanguage != other.identifyLanguage) return false
        if (identifyMultipleLanguages != other.identifyMultipleLanguages) return false
        if (jobExecutionSettings != other.jobExecutionSettings) return false
        if (kmsEncryptionContext != other.kmsEncryptionContext) return false
        if (languageCode != other.languageCode) return false
        if (languageIdSettings != other.languageIdSettings) return false
        if (languageOptions != other.languageOptions) return false
        if (media != other.media) return false
        if (mediaFormat != other.mediaFormat) return false
        if (mediaSampleRateHertz != other.mediaSampleRateHertz) return false
        if (modelSettings != other.modelSettings) return false
        if (outputBucketName != other.outputBucketName) return false
        if (outputEncryptionKmsKeyId != other.outputEncryptionKmsKeyId) return false
        if (outputKey != other.outputKey) return false
        if (settings != other.settings) return false
        if (subtitles != other.subtitles) return false
        if (tags != other.tags) return false
        if (toxicityDetection != other.toxicityDetection) return false
        if (transcriptionJobName != other.transcriptionJobName) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.transcribe.model.StartTranscriptionJobRequest = Builder(this).apply(block).build()

    public class Builder {
        /**
         * Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If you use `ContentRedaction`, you must also include the sub-parameters: `RedactionOutput` and `RedactionType`. You can optionally include `PiiEntityTypes` to choose which types of PII you want to redact. If you do not include `PiiEntityTypes` in your request, all PII is redacted.
         */
        public var contentRedaction: aws.sdk.kotlin.services.transcribe.model.ContentRedaction? = null
        /**
         * Enables automatic language identification in your transcription job request. Use this parameter if your media file contains only one language. If your media contains multiple languages, use `IdentifyMultipleLanguages` instead.
         *
         * If you include `IdentifyLanguage`, you can optionally include a list of language codes, using `LanguageOptions`, that you think may be present in your media file. Including `LanguageOptions` restricts `IdentifyLanguage` to only the language options that you specify, which can improve transcription accuracy.
         *
         * If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your automatic language identification request, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName`, `LanguageModelName`, and `VocabularyFilterName`). If you include `LanguageIdSettings`, also include `LanguageOptions`.
         *
         * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
         */
        public var identifyLanguage: kotlin.Boolean? = null
        /**
         * Enables automatic multi-language identification in your transcription job request. Use this parameter if your media file contains more than one language. If your media contains only one language, use `IdentifyLanguage` instead.
         *
         * If you include `IdentifyMultipleLanguages`, you can optionally include a list of language codes, using `LanguageOptions`, that you think may be present in your media file. Including `LanguageOptions` restricts `IdentifyLanguage` to only the language options that you specify, which can improve transcription accuracy.
         *
         * If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification request, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName` and `VocabularyFilterName`). If you include `LanguageIdSettings`, also include `LanguageOptions`.
         *
         * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
         */
        public var identifyMultipleLanguages: kotlin.Boolean? = null
        /**
         * Makes it possible to control how your transcription job is processed. Currently, the only `JobExecutionSettings` modification you can choose is enabling job queueing using the `AllowDeferredExecution` sub-parameter.
         *
         * If you include `JobExecutionSettings` in your request, you must also include the sub-parameters: `AllowDeferredExecution` and `DataAccessRoleArn`.
         */
        public var jobExecutionSettings: aws.sdk.kotlin.services.transcribe.model.JobExecutionSettings? = null
        /**
         * A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer of security for your data. For more information, see [KMS encryption context](https://docs.aws.amazon.com/transcribe/latest/dg/key-management.html#kms-context) and [Asymmetric keys in KMS](https://docs.aws.amazon.com/transcribe/latest/dg/symmetric-asymmetric.html).
         */
        public var kmsEncryptionContext: Map? = null
        /**
         * The language code that represents the language spoken in the input media file.
         *
         * If you're unsure of the language spoken in your media file, consider using `IdentifyLanguage` or `IdentifyMultipleLanguages` to enable automatic language identification.
         *
         * Note that you must include one of `LanguageCode`, `IdentifyLanguage`, or `IdentifyMultipleLanguages` in your request. If you include more than one of these parameters, your transcription job fails.
         *
         * For a list of supported languages and their associated language codes, refer to the [Supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html) table.
         *
         * To transcribe speech in Modern Standard Arabic (`ar-SA`), your media file must be encoded at a sample rate of 16,000 Hz or higher.
         */
        public var languageCode: aws.sdk.kotlin.services.transcribe.model.LanguageCode? = null
        /**
         * If using automatic language identification in your request and you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter, include `LanguageIdSettings` with the relevant sub-parameters (`VocabularyName`, `LanguageModelName`, and `VocabularyFilterName`). Note that multi-language identification (`IdentifyMultipleLanguages`) doesn't support custom language models.
         *
         * `LanguageIdSettings` supports two to five language codes. Each language code you include can have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you specify must match the languages of the associated custom language models, custom vocabularies, and custom vocabulary filters.
         *
         * It's recommended that you include `LanguageOptions` when using `LanguageIdSettings` to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is in `en-US` but Amazon Transcribe determines that the language spoken in your media is `en-AU`, your custom vocabulary *is not* applied to your transcription. If you include `LanguageOptions` and include `en-US` as the only English language dialect, your custom vocabulary *is* applied to your transcription.
         *
         * If you want to include a custom language model with your request but **do not** want to use automatic language identification, use instead the `` parameter with the `LanguageModelName` sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but **do not** want to use automatic language identification, use instead the `` parameter with the `VocabularyName` or `VocabularyFilterName` (or both) sub-parameter.
         */
        public var languageIdSettings: Map? = null
        /**
         * You can specify two or more language codes that represent the languages you think may be present in your media. Including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
         *
         * If you include `LanguageOptions` in your request, you must also include `IdentifyLanguage`.
         *
         * For more information, refer to [Supported languages](https://docs.aws.amazon.com/transcribe/latest/dg/supported-languages.html).
         *
         * To transcribe speech in Modern Standard Arabic (`ar-SA`), your media file must be encoded at a sample rate of 16,000 Hz or higher.
         */
        public var languageOptions: List? = null
        /**
         * Describes the Amazon S3 location of the media file you want to use in your request.
         */
        public var media: aws.sdk.kotlin.services.transcribe.model.Media? = null
        /**
         * Specify the format of your input media file.
         */
        public var mediaFormat: aws.sdk.kotlin.services.transcribe.model.MediaFormat? = null
        /**
         * The sample rate, in hertz, of the audio track in your input media file.
         *
         * If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you specify and the value detected, your job fails. In most cases, you can omit `MediaSampleRateHertz` and let Amazon Transcribe determine the sample rate.
         */
        public var mediaSampleRateHertz: kotlin.Int? = null
        /**
         * Specify the custom language model you want to include with your transcription job. If you include `ModelSettings` in your request, you must include the `LanguageModelName` sub-parameter.
         *
         * For more information, see [Custom language models](https://docs.aws.amazon.com/transcribe/latest/dg/custom-language-models.html).
         */
        public var modelSettings: aws.sdk.kotlin.services.transcribe.model.ModelSettings? = null
        /**
         * The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the `S3://` prefix of the specified bucket.
         *
         * If you want your output to go to a sub-folder of this bucket, specify it using the `OutputKey` parameter; `OutputBucketName` only accepts the name of a bucket.
         *
         * For example, if you want your output stored in `S3://DOC-EXAMPLE-BUCKET`, set `OutputBucketName` to `DOC-EXAMPLE-BUCKET`. However, if you want your output stored in `S3://DOC-EXAMPLE-BUCKET/test-files/`, set `OutputBucketName` to `DOC-EXAMPLE-BUCKET` and `OutputKey` to `test-files/`.
         *
         * Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3 permissions using the [Amazon Web Services Management Console](https://console.aws.amazon.com/s3). See also [Permissions Required for IAM User Roles](https://docs.aws.amazon.com/transcribe/latest/dg/security_iam_id-based-policy-examples.html#auth-role-iam-user).
         *
         * If you do not specify `OutputBucketName`, your transcript is placed in a service-managed Amazon S3 bucket and you are provided with a URI to access your transcript.
         */
        public var outputBucketName: kotlin.String? = null
        /**
         * The KMS key you want to use to encrypt your transcription output.
         *
         * If using a key located in the **current** Amazon Web Services account, you can specify your KMS key in one of four ways:
         * + Use the KMS key ID itself. For example, `1234abcd-12ab-34cd-56ef-1234567890ab`.
         * + Use an alias for the KMS key ID. For example, `alias/ExampleAlias`.
         * + Use the Amazon Resource Name (ARN) for the KMS key ID. For example, `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
         * + Use the ARN for the KMS key alias. For example, `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
         *
         * If using a key located in a **different** Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways:
         * + Use the ARN for the KMS key ID. For example, `arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab`.
         * + Use the ARN for the KMS key alias. For example, `arn:aws:kms:region:account-ID:alias/ExampleAlias`.
         *
         * If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
         *
         * If you specify a KMS key to encrypt your output, you must also specify an output location using the `OutputLocation` parameter.
         *
         * Note that the role making the request must have permission to use the specified KMS key.
         */
        public var outputEncryptionKmsKeyId: kotlin.String? = null
        /**
         * Use in combination with `OutputBucketName` to specify the output location of your transcript and, optionally, a unique name for your output file. The default name for your transcription output is the same as the name you specified for your transcription job (`TranscriptionJobName`).
         *
         * Here are some examples of how you can use `OutputKey`:
         * + If you specify 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName` and 'my-transcript.json' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/my-transcript.json`.
         * + If you specify 'my-first-transcription' as the `TranscriptionJobName`, 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName`, and 'my-transcript' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json`.
         * + If you specify 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName` and 'test-files/my-transcript.json' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json`.
         * + If you specify 'my-first-transcription' as the `TranscriptionJobName`, 'DOC-EXAMPLE-BUCKET' as the `OutputBucketName`, and 'test-files/my-transcript' as the `OutputKey`, your transcription output path is `s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json`.
         *
         * If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
         */
        public var outputKey: kotlin.String? = null
        /**
         * Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
         *
         * If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but **do not** want to use automatic language identification, use `Settings` with the `VocabularyName` or `VocabularyFilterName` (or both) sub-parameter.
         *
         * If you're using automatic language identification with your request and want to include a custom language model, a custom vocabulary, or a custom vocabulary filter, use instead the `` parameter with the `LanguageModelName`, `VocabularyName` or `VocabularyFilterName` sub-parameters.
         */
        public var settings: aws.sdk.kotlin.services.transcribe.model.Settings? = null
        /**
         * Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
         */
        public var subtitles: aws.sdk.kotlin.services.transcribe.model.Subtitles? = null
        /**
         * Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you start this new job.
         *
         * To learn more about using tags with Amazon Transcribe, refer to [Tagging resources](https://docs.aws.amazon.com/transcribe/latest/dg/tagging.html).
         */
        public var tags: List? = null
        /**
         * Enables toxic speech detection in your transcript. If you include `ToxicityDetection` in your request, you must also include `ToxicityCategories`.
         *
         * For information on the types of toxic speech Amazon Transcribe can detect, see [Detecting toxic speech](https://docs.aws.amazon.com/transcribe/latest/dg/toxic-language.html).
         */
        public var toxicityDetection: List? = null
        /**
         * A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default name of your transcription output file. If you want to specify a different name for your transcription output, use the `OutputKey` parameter.
         *
         * This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If you try to create a new job with the same name as an existing job, you get a `ConflictException` error.
         */
        public var transcriptionJobName: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.transcribe.model.StartTranscriptionJobRequest) : this() {
            this.contentRedaction = x.contentRedaction
            this.identifyLanguage = x.identifyLanguage
            this.identifyMultipleLanguages = x.identifyMultipleLanguages
            this.jobExecutionSettings = x.jobExecutionSettings
            this.kmsEncryptionContext = x.kmsEncryptionContext
            this.languageCode = x.languageCode
            this.languageIdSettings = x.languageIdSettings
            this.languageOptions = x.languageOptions
            this.media = x.media
            this.mediaFormat = x.mediaFormat
            this.mediaSampleRateHertz = x.mediaSampleRateHertz
            this.modelSettings = x.modelSettings
            this.outputBucketName = x.outputBucketName
            this.outputEncryptionKmsKeyId = x.outputEncryptionKmsKeyId
            this.outputKey = x.outputKey
            this.settings = x.settings
            this.subtitles = x.subtitles
            this.tags = x.tags
            this.toxicityDetection = x.toxicityDetection
            this.transcriptionJobName = x.transcriptionJobName
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.transcribe.model.StartTranscriptionJobRequest = StartTranscriptionJobRequest(this)

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.ContentRedaction] inside the given [block]
         */
        public fun contentRedaction(block: aws.sdk.kotlin.services.transcribe.model.ContentRedaction.Builder.() -> kotlin.Unit) {
            this.contentRedaction = aws.sdk.kotlin.services.transcribe.model.ContentRedaction.invoke(block)
        }

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.JobExecutionSettings] inside the given [block]
         */
        public fun jobExecutionSettings(block: aws.sdk.kotlin.services.transcribe.model.JobExecutionSettings.Builder.() -> kotlin.Unit) {
            this.jobExecutionSettings = aws.sdk.kotlin.services.transcribe.model.JobExecutionSettings.invoke(block)
        }

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.Media] inside the given [block]
         */
        public fun media(block: aws.sdk.kotlin.services.transcribe.model.Media.Builder.() -> kotlin.Unit) {
            this.media = aws.sdk.kotlin.services.transcribe.model.Media.invoke(block)
        }

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.ModelSettings] inside the given [block]
         */
        public fun modelSettings(block: aws.sdk.kotlin.services.transcribe.model.ModelSettings.Builder.() -> kotlin.Unit) {
            this.modelSettings = aws.sdk.kotlin.services.transcribe.model.ModelSettings.invoke(block)
        }

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.Settings] inside the given [block]
         */
        public fun settings(block: aws.sdk.kotlin.services.transcribe.model.Settings.Builder.() -> kotlin.Unit) {
            this.settings = aws.sdk.kotlin.services.transcribe.model.Settings.invoke(block)
        }

        /**
         * construct an [aws.sdk.kotlin.services.transcribe.model.Subtitles] inside the given [block]
         */
        public fun subtitles(block: aws.sdk.kotlin.services.transcribe.model.Subtitles.Builder.() -> kotlin.Unit) {
            this.subtitles = aws.sdk.kotlin.services.transcribe.model.Subtitles.invoke(block)
        }

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy