All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.transcribestreaming.model.StartMedicalStreamTranscriptionRequest.kt Maven / Gradle / Ivy

The newest version!
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.transcribestreaming.model

import aws.smithy.kotlin.runtime.SdkDsl
import kotlinx.coroutines.flow.Flow

public class StartMedicalStreamTranscriptionRequest private constructor(builder: Builder) {
    /**
     * An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.
     *
     * For more information, see [Transcribing streaming audio](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html).
     */
    public val audioStream: Flow? = builder.audioStream
    /**
     * Labels all personal health information (PHI) identified in your transcript.
     *
     * Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.
     *
     * For more information, see [Identifying personal health information (PHI) in a transcription](https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html).
     */
    public val contentIdentificationType: aws.sdk.kotlin.services.transcribestreaming.model.MedicalContentIdentificationType? = builder.contentIdentificationType
    /**
     * Enables channel identification in multi-channel audio.
     *
     * Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
     *
     * If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.
     *
     * If you include `EnableChannelIdentification` in your request, you must also include `NumberOfChannels`.
     *
     * For more information, see [Transcribing multi-channel audio](https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html).
     */
    public val enableChannelIdentification: kotlin.Boolean? = builder.enableChannelIdentification
    /**
     * Specify the language code that represents the language spoken in your audio.
     *
     * Amazon Transcribe Medical only supports US English (`en-US`).
     */
    public val languageCode: aws.sdk.kotlin.services.transcribestreaming.model.LanguageCode? = builder.languageCode
    /**
     * Specify the encoding used for the input audio. Supported formats are:
     * + FLAC
     * + OPUS-encoded audio in an Ogg container
     * + PCM (only signed 16-bit little-endian audio formats, which does not include WAV)
     *
     * For more information, see [Media formats](https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio).
     */
    public val mediaEncoding: aws.sdk.kotlin.services.transcribestreaming.model.MediaEncoding? = builder.mediaEncoding
    /**
     * The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.
     */
    public val mediaSampleRateHertz: kotlin.Int? = builder.mediaSampleRateHertz
    /**
     * Specify the number of channels in your audio stream. This value must be `2`, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.
     *
     * If you include `NumberOfChannels` in your request, you must also include `EnableChannelIdentification`.
     */
    public val numberOfChannels: kotlin.Int? = builder.numberOfChannels
    /**
     * Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.
     */
    public val sessionId: kotlin.String? = builder.sessionId
    /**
     * Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
     *
     * For more information, see [Partitioning speakers (diarization)](https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html).
     */
    public val showSpeakerLabel: kotlin.Boolean? = builder.showSpeakerLabel
    /**
     * Specify the medical specialty contained in your audio.
     */
    public val specialty: aws.sdk.kotlin.services.transcribestreaming.model.Specialty? = builder.specialty
    /**
     * Specify the type of input audio. For example, choose `DICTATION` for a provider dictating patient notes and `CONVERSATION` for a dialogue between a patient and a medical professional.
     */
    public val type: aws.sdk.kotlin.services.transcribestreaming.model.Type? = builder.type
    /**
     * Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.
     */
    public val vocabularyName: kotlin.String? = builder.vocabularyName

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.transcribestreaming.model.StartMedicalStreamTranscriptionRequest = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("StartMedicalStreamTranscriptionRequest(")
        append("audioStream=$audioStream,")
        append("contentIdentificationType=$contentIdentificationType,")
        append("enableChannelIdentification=$enableChannelIdentification,")
        append("languageCode=$languageCode,")
        append("mediaEncoding=$mediaEncoding,")
        append("mediaSampleRateHertz=$mediaSampleRateHertz,")
        append("numberOfChannels=$numberOfChannels,")
        append("sessionId=$sessionId,")
        append("showSpeakerLabel=$showSpeakerLabel,")
        append("specialty=$specialty,")
        append("type=$type,")
        append("vocabularyName=$vocabularyName")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = audioStream?.hashCode() ?: 0
        result = 31 * result + (contentIdentificationType?.hashCode() ?: 0)
        result = 31 * result + (enableChannelIdentification?.hashCode() ?: 0)
        result = 31 * result + (languageCode?.hashCode() ?: 0)
        result = 31 * result + (mediaEncoding?.hashCode() ?: 0)
        result = 31 * result + (mediaSampleRateHertz ?: 0)
        result = 31 * result + (numberOfChannels ?: 0)
        result = 31 * result + (sessionId?.hashCode() ?: 0)
        result = 31 * result + (showSpeakerLabel?.hashCode() ?: 0)
        result = 31 * result + (specialty?.hashCode() ?: 0)
        result = 31 * result + (type?.hashCode() ?: 0)
        result = 31 * result + (vocabularyName?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as StartMedicalStreamTranscriptionRequest

        if (audioStream != other.audioStream) return false
        if (contentIdentificationType != other.contentIdentificationType) return false
        if (enableChannelIdentification != other.enableChannelIdentification) return false
        if (languageCode != other.languageCode) return false
        if (mediaEncoding != other.mediaEncoding) return false
        if (mediaSampleRateHertz != other.mediaSampleRateHertz) return false
        if (numberOfChannels != other.numberOfChannels) return false
        if (sessionId != other.sessionId) return false
        if (showSpeakerLabel != other.showSpeakerLabel) return false
        if (specialty != other.specialty) return false
        if (type != other.type) return false
        if (vocabularyName != other.vocabularyName) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.transcribestreaming.model.StartMedicalStreamTranscriptionRequest = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames.
         *
         * For more information, see [Transcribing streaming audio](https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html).
         */
        public var audioStream: Flow? = null
        /**
         * Labels all personal health information (PHI) identified in your transcript.
         *
         * Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment.
         *
         * For more information, see [Identifying personal health information (PHI) in a transcription](https://docs.aws.amazon.com/transcribe/latest/dg/phi-id.html).
         */
        public var contentIdentificationType: aws.sdk.kotlin.services.transcribestreaming.model.MedicalContentIdentificationType? = null
        /**
         * Enables channel identification in multi-channel audio.
         *
         * Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
         *
         * If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel.
         *
         * If you include `EnableChannelIdentification` in your request, you must also include `NumberOfChannels`.
         *
         * For more information, see [Transcribing multi-channel audio](https://docs.aws.amazon.com/transcribe/latest/dg/channel-id.html).
         */
        public var enableChannelIdentification: kotlin.Boolean? = null
        /**
         * Specify the language code that represents the language spoken in your audio.
         *
         * Amazon Transcribe Medical only supports US English (`en-US`).
         */
        public var languageCode: aws.sdk.kotlin.services.transcribestreaming.model.LanguageCode? = null
        /**
         * Specify the encoding used for the input audio. Supported formats are:
         * + FLAC
         * + OPUS-encoded audio in an Ogg container
         * + PCM (only signed 16-bit little-endian audio formats, which does not include WAV)
         *
         * For more information, see [Media formats](https://docs.aws.amazon.com/transcribe/latest/dg/how-input.html#how-input-audio).
         */
        public var mediaEncoding: aws.sdk.kotlin.services.transcribestreaming.model.MediaEncoding? = null
        /**
         * The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.
         */
        public var mediaSampleRateHertz: kotlin.Int? = null
        /**
         * Specify the number of channels in your audio stream. This value must be `2`, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request.
         *
         * If you include `NumberOfChannels` in your request, you must also include `EnableChannelIdentification`.
         */
        public var numberOfChannels: kotlin.Int? = null
        /**
         * Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response.
         */
        public var sessionId: kotlin.String? = null
        /**
         * Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
         *
         * For more information, see [Partitioning speakers (diarization)](https://docs.aws.amazon.com/transcribe/latest/dg/diarization.html).
         */
        public var showSpeakerLabel: kotlin.Boolean? = null
        /**
         * Specify the medical specialty contained in your audio.
         */
        public var specialty: aws.sdk.kotlin.services.transcribestreaming.model.Specialty? = null
        /**
         * Specify the type of input audio. For example, choose `DICTATION` for a provider dictating patient notes and `CONVERSATION` for a dialogue between a patient and a medical professional.
         */
        public var type: aws.sdk.kotlin.services.transcribestreaming.model.Type? = null
        /**
         * Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.
         */
        public var vocabularyName: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.transcribestreaming.model.StartMedicalStreamTranscriptionRequest) : this() {
            this.audioStream = x.audioStream
            this.contentIdentificationType = x.contentIdentificationType
            this.enableChannelIdentification = x.enableChannelIdentification
            this.languageCode = x.languageCode
            this.mediaEncoding = x.mediaEncoding
            this.mediaSampleRateHertz = x.mediaSampleRateHertz
            this.numberOfChannels = x.numberOfChannels
            this.sessionId = x.sessionId
            this.showSpeakerLabel = x.showSpeakerLabel
            this.specialty = x.specialty
            this.type = x.type
            this.vocabularyName = x.vocabularyName
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.transcribestreaming.model.StartMedicalStreamTranscriptionRequest = StartMedicalStreamTranscriptionRequest(this)

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy