All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.lexruntimev2.model.RecognizeUtteranceResponse.kt Maven / Gradle / Ivy

// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.lexruntimev2.model

import aws.smithy.kotlin.runtime.SdkDsl
import aws.smithy.kotlin.runtime.content.ByteStream

public class RecognizeUtteranceResponse private constructor(builder: Builder) {
    /**
     * The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the `clarificationPrompt` configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the `confirmationPrompt`. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
     */
    public val audioStream: aws.smithy.kotlin.runtime.content.ByteStream? = builder.audioStream
    /**
     * Content type as specified in the `responseContentType` in the request.
     */
    public val contentType: kotlin.String? = builder.contentType
    /**
     * Indicates whether the input mode to the operation was text, speech, or from a touch-tone keypad.
     */
    public val inputMode: kotlin.String? = builder.inputMode
    /**
     * The text used to process the request.
     *
     * If the input was an audio stream, the `inputTranscript` field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.
     *
     * The `inputTranscript` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
     */
    public val inputTranscript: kotlin.String? = builder.inputTranscript
    /**
     * A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.
     *
     * Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.
     *
     * The `interpretations` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
     */
    public val interpretations: kotlin.String? = builder.interpretations
    /**
     * A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.
     *
     * The `messages` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
     */
    public val messages: kotlin.String? = builder.messages
    /**
     * The bot member that recognized the utterance.
     */
    public val recognizedBotMember: kotlin.String? = builder.recognizedBotMember
    /**
     * The attributes sent in the request.
     *
     * The `requestAttributes` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.
     */
    public val requestAttributes: kotlin.String? = builder.requestAttributes
    /**
     * The identifier of the session in use.
     */
    public val sessionId: kotlin.String? = builder.sessionId
    /**
     * Represents the current state of the dialog between the user and the bot.
     *
     * Use this to determine the progress of the conversation and what the next action might be.
     *
     * The `sessionState` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
     */
    public val sessionState: kotlin.String? = builder.sessionState

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.lexruntimev2.model.RecognizeUtteranceResponse = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("RecognizeUtteranceResponse(")
        append("audioStream=$audioStream,")
        append("contentType=$contentType,")
        append("inputMode=$inputMode,")
        append("inputTranscript=$inputTranscript,")
        append("interpretations=$interpretations,")
        append("messages=$messages,")
        append("recognizedBotMember=$recognizedBotMember,")
        append("requestAttributes=$requestAttributes,")
        append("sessionId=$sessionId,")
        append("sessionState=$sessionState")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = audioStream?.hashCode() ?: 0
        result = 31 * result + (contentType?.hashCode() ?: 0)
        result = 31 * result + (inputMode?.hashCode() ?: 0)
        result = 31 * result + (inputTranscript?.hashCode() ?: 0)
        result = 31 * result + (interpretations?.hashCode() ?: 0)
        result = 31 * result + (messages?.hashCode() ?: 0)
        result = 31 * result + (recognizedBotMember?.hashCode() ?: 0)
        result = 31 * result + (requestAttributes?.hashCode() ?: 0)
        result = 31 * result + (sessionId?.hashCode() ?: 0)
        result = 31 * result + (sessionState?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as RecognizeUtteranceResponse

        if (audioStream != other.audioStream) return false
        if (contentType != other.contentType) return false
        if (inputMode != other.inputMode) return false
        if (inputTranscript != other.inputTranscript) return false
        if (interpretations != other.interpretations) return false
        if (messages != other.messages) return false
        if (recognizedBotMember != other.recognizedBotMember) return false
        if (requestAttributes != other.requestAttributes) return false
        if (sessionId != other.sessionId) return false
        if (sessionState != other.sessionState) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.lexruntimev2.model.RecognizeUtteranceResponse = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * The prompt or statement to send to the user. This is based on the bot configuration and context. For example, if Amazon Lex V2 did not understand the user intent, it sends the `clarificationPrompt` configured for the bot. If the intent requires confirmation before taking the fulfillment action, it sends the `confirmationPrompt`. Another example: Suppose that the Lambda function successfully fulfilled the intent, and sent a message to convey to the user. Then Amazon Lex V2 sends that message in the response.
         */
        public var audioStream: aws.smithy.kotlin.runtime.content.ByteStream? = null
        /**
         * Content type as specified in the `responseContentType` in the request.
         */
        public var contentType: kotlin.String? = null
        /**
         * Indicates whether the input mode to the operation was text, speech, or from a touch-tone keypad.
         */
        public var inputMode: kotlin.String? = null
        /**
         * The text used to process the request.
         *
         * If the input was an audio stream, the `inputTranscript` field contains the text extracted from the audio stream. This is the text that is actually processed to recognize intents and slot values. You can use this information to determine if Amazon Lex V2 is correctly processing the audio that you send.
         *
         * The `inputTranscript` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
         */
        public var inputTranscript: kotlin.String? = null
        /**
         * A list of intents that Amazon Lex V2 determined might satisfy the user's utterance.
         *
         * Each interpretation includes the intent, a score that indicates how confident Amazon Lex V2 is that the interpretation is the correct one, and an optional sentiment response that indicates the sentiment expressed in the utterance.
         *
         * The `interpretations` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
         */
        public var interpretations: kotlin.String? = null
        /**
         * A list of messages that were last sent to the user. The messages are ordered based on the order that you returned the messages from your Lambda function or the order that the messages are defined in the bot.
         *
         * The `messages` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
         */
        public var messages: kotlin.String? = null
        /**
         * The bot member that recognized the utterance.
         */
        public var recognizedBotMember: kotlin.String? = null
        /**
         * The attributes sent in the request.
         *
         * The `requestAttributes` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents.
         */
        public var requestAttributes: kotlin.String? = null
        /**
         * The identifier of the session in use.
         */
        public var sessionId: kotlin.String? = null
        /**
         * Represents the current state of the dialog between the user and the bot.
         *
         * Use this to determine the progress of the conversation and what the next action might be.
         *
         * The `sessionState` field is compressed with gzip and then base64 encoded. Before you can use the contents of the field, you must decode and decompress the contents. See the example for a simple function to decode and decompress the contents.
         */
        public var sessionState: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.lexruntimev2.model.RecognizeUtteranceResponse) : this() {
            this.audioStream = x.audioStream
            this.contentType = x.contentType
            this.inputMode = x.inputMode
            this.inputTranscript = x.inputTranscript
            this.interpretations = x.interpretations
            this.messages = x.messages
            this.recognizedBotMember = x.recognizedBotMember
            this.requestAttributes = x.requestAttributes
            this.sessionId = x.sessionId
            this.sessionState = x.sessionState
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.lexruntimev2.model.RecognizeUtteranceResponse = RecognizeUtteranceResponse(this)

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy