commonMain.aws.sdk.kotlin.services.bedrockruntime.model.ConverseRequest.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of bedrockruntime-jvm Show documentation
Show all versions of bedrockruntime-jvm Show documentation
The AWS Kotlin client for Bedrock Runtime
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.bedrockruntime.model
import aws.smithy.kotlin.runtime.SdkDsl
import aws.smithy.kotlin.runtime.content.Document
public class ConverseRequest private constructor(builder: Builder) {
/**
* Additional inference parameters that the model supports, beyond the base set of inference parameters that `Converse` and `ConverseStream` support in the `inferenceConfig` field. For more information, see [Model parameters](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html).
*/
public val additionalModelRequestFields: aws.smithy.kotlin.runtime.content.Document? = builder.additionalModelRequestFields
/**
* Additional model parameters field paths to return in the response. `Converse` and `ConverseStream` return the requested fields as a JSON Pointer object in the `additionalModelResponseFields` field. The following is example JSON for `additionalModelResponseFieldPaths`.
*
* `[ "/stop_sequence" ]`
*
* For information about the JSON Pointer syntax, see the [Internet Engineering Task Force (IETF)](https://datatracker.ietf.org/doc/html/rfc6901) documentation.
*
* `Converse` and `ConverseStream` reject an empty JSON Pointer or incorrectly structured JSON Pointer with a `400` error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by `Converse`.
*/
public val additionalModelResponseFieldPaths: List? = builder.additionalModelResponseFieldPaths
/**
* Configuration information for a guardrail that you want to use in the request. If you include `guardContent` blocks in the `content` field in the `messages` field, the guardrail operates only on those messages. If you include no `guardContent` blocks, the guardrail operates on all messages in the request body and in any included prompt resource.
*/
public val guardrailConfig: aws.sdk.kotlin.services.bedrockruntime.model.GuardrailConfiguration? = builder.guardrailConfig
/**
* Inference parameters to pass to the model. `Converse` and `ConverseStream` support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the `additionalModelRequestFields` request field.
*/
public val inferenceConfig: aws.sdk.kotlin.services.bedrockruntime.model.InferenceConfiguration? = builder.inferenceConfig
/**
* The messages that you want to send to the model.
*/
public val messages: List? = builder.messages
/**
* Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
* + If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see [Amazon Bedrock base model IDs (on-demand throughput)](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns) in the Amazon Bedrock User Guide.
* + If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see [Supported Regions and models for cross-region inference](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html) in the Amazon Bedrock User Guide.
* + If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see [Run inference using a Provisioned Throughput](https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html) in the Amazon Bedrock User Guide.
* + If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see [Use a custom model in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html) in the Amazon Bedrock User Guide.
* + To include a prompt that was defined in [Prompt management](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html), specify the ARN of the prompt version to use.
*
* The Converse API doesn't support [imported models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html).
*/
public val modelId: kotlin.String? = builder.modelId
/**
* Model performance settings for the request.
*/
public val performanceConfig: aws.sdk.kotlin.services.bedrockruntime.model.PerformanceConfiguration? = builder.performanceConfig
/**
* Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the `modelId` field.
*/
public val promptVariables: Map? = builder.promptVariables
/**
* A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.
*/
public val system: List? = builder.system
/**
* Configuration information for the tools that the model can use when generating a response.
*
* This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.
*/
public val toolConfig: aws.sdk.kotlin.services.bedrockruntime.model.ToolConfiguration? = builder.toolConfig
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.bedrockruntime.model.ConverseRequest = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("ConverseRequest(")
append("additionalModelRequestFields=$additionalModelRequestFields,")
append("additionalModelResponseFieldPaths=$additionalModelResponseFieldPaths,")
append("guardrailConfig=$guardrailConfig,")
append("inferenceConfig=$inferenceConfig,")
append("messages=$messages,")
append("modelId=$modelId,")
append("performanceConfig=$performanceConfig,")
append("promptVariables=*** Sensitive Data Redacted ***,")
append("system=$system,")
append("toolConfig=$toolConfig")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = additionalModelRequestFields?.hashCode() ?: 0
result = 31 * result + (additionalModelResponseFieldPaths?.hashCode() ?: 0)
result = 31 * result + (guardrailConfig?.hashCode() ?: 0)
result = 31 * result + (inferenceConfig?.hashCode() ?: 0)
result = 31 * result + (messages?.hashCode() ?: 0)
result = 31 * result + (modelId?.hashCode() ?: 0)
result = 31 * result + (performanceConfig?.hashCode() ?: 0)
result = 31 * result + (promptVariables?.hashCode() ?: 0)
result = 31 * result + (system?.hashCode() ?: 0)
result = 31 * result + (toolConfig?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as ConverseRequest
if (additionalModelRequestFields != other.additionalModelRequestFields) return false
if (additionalModelResponseFieldPaths != other.additionalModelResponseFieldPaths) return false
if (guardrailConfig != other.guardrailConfig) return false
if (inferenceConfig != other.inferenceConfig) return false
if (messages != other.messages) return false
if (modelId != other.modelId) return false
if (performanceConfig != other.performanceConfig) return false
if (promptVariables != other.promptVariables) return false
if (system != other.system) return false
if (toolConfig != other.toolConfig) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.bedrockruntime.model.ConverseRequest = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* Additional inference parameters that the model supports, beyond the base set of inference parameters that `Converse` and `ConverseStream` support in the `inferenceConfig` field. For more information, see [Model parameters](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html).
*/
public var additionalModelRequestFields: aws.smithy.kotlin.runtime.content.Document? = null
/**
* Additional model parameters field paths to return in the response. `Converse` and `ConverseStream` return the requested fields as a JSON Pointer object in the `additionalModelResponseFields` field. The following is example JSON for `additionalModelResponseFieldPaths`.
*
* `[ "/stop_sequence" ]`
*
* For information about the JSON Pointer syntax, see the [Internet Engineering Task Force (IETF)](https://datatracker.ietf.org/doc/html/rfc6901) documentation.
*
* `Converse` and `ConverseStream` reject an empty JSON Pointer or incorrectly structured JSON Pointer with a `400` error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by `Converse`.
*/
public var additionalModelResponseFieldPaths: List? = null
/**
* Configuration information for a guardrail that you want to use in the request. If you include `guardContent` blocks in the `content` field in the `messages` field, the guardrail operates only on those messages. If you include no `guardContent` blocks, the guardrail operates on all messages in the request body and in any included prompt resource.
*/
public var guardrailConfig: aws.sdk.kotlin.services.bedrockruntime.model.GuardrailConfiguration? = null
/**
* Inference parameters to pass to the model. `Converse` and `ConverseStream` support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the `additionalModelRequestFields` request field.
*/
public var inferenceConfig: aws.sdk.kotlin.services.bedrockruntime.model.InferenceConfiguration? = null
/**
* The messages that you want to send to the model.
*/
public var messages: List? = null
/**
* Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use:
* + If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see [Amazon Bedrock base model IDs (on-demand throughput)](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns) in the Amazon Bedrock User Guide.
* + If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see [Supported Regions and models for cross-region inference](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference-support.html) in the Amazon Bedrock User Guide.
* + If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see [Run inference using a Provisioned Throughput](https://docs.aws.amazon.com/bedrock/latest/userguide/prov-thru-use.html) in the Amazon Bedrock User Guide.
* + If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see [Use a custom model in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-use.html) in the Amazon Bedrock User Guide.
* + To include a prompt that was defined in [Prompt management](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-management.html), specify the ARN of the prompt version to use.
*
* The Converse API doesn't support [imported models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-import-model.html).
*/
public var modelId: kotlin.String? = null
/**
* Model performance settings for the request.
*/
public var performanceConfig: aws.sdk.kotlin.services.bedrockruntime.model.PerformanceConfiguration? = null
/**
* Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the `modelId` field.
*/
public var promptVariables: Map? = null
/**
* A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation.
*/
public var system: List? = null
/**
* Configuration information for the tools that the model can use when generating a response.
*
* This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.
*/
public var toolConfig: aws.sdk.kotlin.services.bedrockruntime.model.ToolConfiguration? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.bedrockruntime.model.ConverseRequest) : this() {
this.additionalModelRequestFields = x.additionalModelRequestFields
this.additionalModelResponseFieldPaths = x.additionalModelResponseFieldPaths
this.guardrailConfig = x.guardrailConfig
this.inferenceConfig = x.inferenceConfig
this.messages = x.messages
this.modelId = x.modelId
this.performanceConfig = x.performanceConfig
this.promptVariables = x.promptVariables
this.system = x.system
this.toolConfig = x.toolConfig
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.bedrockruntime.model.ConverseRequest = ConverseRequest(this)
/**
* construct an [aws.sdk.kotlin.services.bedrockruntime.model.GuardrailConfiguration] inside the given [block]
*/
public fun guardrailConfig(block: aws.sdk.kotlin.services.bedrockruntime.model.GuardrailConfiguration.Builder.() -> kotlin.Unit) {
this.guardrailConfig = aws.sdk.kotlin.services.bedrockruntime.model.GuardrailConfiguration.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.bedrockruntime.model.InferenceConfiguration] inside the given [block]
*/
public fun inferenceConfig(block: aws.sdk.kotlin.services.bedrockruntime.model.InferenceConfiguration.Builder.() -> kotlin.Unit) {
this.inferenceConfig = aws.sdk.kotlin.services.bedrockruntime.model.InferenceConfiguration.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.bedrockruntime.model.PerformanceConfiguration] inside the given [block]
*/
public fun performanceConfig(block: aws.sdk.kotlin.services.bedrockruntime.model.PerformanceConfiguration.Builder.() -> kotlin.Unit) {
this.performanceConfig = aws.sdk.kotlin.services.bedrockruntime.model.PerformanceConfiguration.invoke(block)
}
/**
* construct an [aws.sdk.kotlin.services.bedrockruntime.model.ToolConfiguration] inside the given [block]
*/
public fun toolConfig(block: aws.sdk.kotlin.services.bedrockruntime.model.ToolConfiguration.Builder.() -> kotlin.Unit) {
this.toolConfig = aws.sdk.kotlin.services.bedrockruntime.model.ToolConfiguration.invoke(block)
}
internal fun correctErrors(): Builder {
return this
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy