All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration.kt Maven / Gradle / Ivy

There is a newer version: 1.3.77
Show newest version
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.bedrockagent.model

import aws.smithy.kotlin.runtime.SdkDsl

/**
 * Contains inference configurations related to model inference for a prompt. For more information, see [Inference parameters](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-parameters.html).
 */
public class PromptModelInferenceConfiguration private constructor(builder: Builder) {
    /**
     * The maximum number of tokens to return in the response.
     */
    public val maxTokens: kotlin.Int? = builder.maxTokens
    /**
     * A list of strings that define sequences after which the model will stop generating.
     */
    public val stopSequences: List? = builder.stopSequences
    /**
     * Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.
     */
    public val temperature: kotlin.Float? = builder.temperature
    /**
     * The percentage of most-likely candidates that the model considers for the next token.
     */
    public val topP: kotlin.Float? = builder.topP

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("PromptModelInferenceConfiguration(")
        append("maxTokens=$maxTokens,")
        append("stopSequences=$stopSequences,")
        append("temperature=$temperature,")
        append("topP=$topP")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = maxTokens ?: 0
        result = 31 * result + (stopSequences?.hashCode() ?: 0)
        result = 31 * result + (temperature?.hashCode() ?: 0)
        result = 31 * result + (topP?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as PromptModelInferenceConfiguration

        if (maxTokens != other.maxTokens) return false
        if (stopSequences != other.stopSequences) return false
        if (!(temperature?.equals(other.temperature) ?: (other.temperature == null))) return false
        if (!(topP?.equals(other.topP) ?: (other.topP == null))) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * The maximum number of tokens to return in the response.
         */
        public var maxTokens: kotlin.Int? = null
        /**
         * A list of strings that define sequences after which the model will stop generating.
         */
        public var stopSequences: List? = null
        /**
         * Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.
         */
        public var temperature: kotlin.Float? = null
        /**
         * The percentage of most-likely candidates that the model considers for the next token.
         */
        public var topP: kotlin.Float? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration) : this() {
            this.maxTokens = x.maxTokens
            this.stopSequences = x.stopSequences
            this.temperature = x.temperature
            this.topP = x.topP
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = PromptModelInferenceConfiguration(this)

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy