commonMain.aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of bedrockagent-jvm Show documentation
Show all versions of bedrockagent-jvm Show documentation
The AWS SDK for Kotlin client for Bedrock Agent
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.bedrockagent.model
import aws.smithy.kotlin.runtime.SdkDsl
/**
* Contains inference configurations related to model inference for a prompt. For more information, see [Inference parameters](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-parameters.html).
*/
public class PromptModelInferenceConfiguration private constructor(builder: Builder) {
/**
* The maximum number of tokens to return in the response.
*/
public val maxTokens: kotlin.Int? = builder.maxTokens
/**
* A list of strings that define sequences after which the model will stop generating.
*/
public val stopSequences: List? = builder.stopSequences
/**
* Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.
*/
public val temperature: kotlin.Float? = builder.temperature
/**
* The number of most-likely candidates that the model considers for the next token during generation.
*/
public val topK: kotlin.Int? = builder.topK
/**
* The percentage of most-likely candidates that the model considers for the next token.
*/
public val topP: kotlin.Float? = builder.topP
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("PromptModelInferenceConfiguration(")
append("maxTokens=$maxTokens,")
append("stopSequences=$stopSequences,")
append("temperature=$temperature,")
append("topK=$topK,")
append("topP=$topP")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = maxTokens ?: 0
result = 31 * result + (stopSequences?.hashCode() ?: 0)
result = 31 * result + (temperature?.hashCode() ?: 0)
result = 31 * result + (topK ?: 0)
result = 31 * result + (topP?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as PromptModelInferenceConfiguration
if (maxTokens != other.maxTokens) return false
if (stopSequences != other.stopSequences) return false
if (!(temperature?.equals(other.temperature) ?: (other.temperature == null))) return false
if (topK != other.topK) return false
if (!(topP?.equals(other.topP) ?: (other.topP == null))) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* The maximum number of tokens to return in the response.
*/
public var maxTokens: kotlin.Int? = null
/**
* A list of strings that define sequences after which the model will stop generating.
*/
public var stopSequences: List? = null
/**
* Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.
*/
public var temperature: kotlin.Float? = null
/**
* The number of most-likely candidates that the model considers for the next token during generation.
*/
public var topK: kotlin.Int? = null
/**
* The percentage of most-likely candidates that the model considers for the next token.
*/
public var topP: kotlin.Float? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration) : this() {
this.maxTokens = x.maxTokens
this.stopSequences = x.stopSequences
this.temperature = x.temperature
this.topK = x.topK
this.topP = x.topP
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.bedrockagent.model.PromptModelInferenceConfiguration = PromptModelInferenceConfiguration(this)
internal fun correctErrors(): Builder {
return this
}
}
}