commonMain.com.xebia.functional.openai.generated.model.TruncationObject.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of xef-openai-client Show documentation
Show all versions of xef-openai-client Show documentation
Building applications with LLMs through composability in Kotlin
The newest version!
/**
*
* Please note:
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*
*/
@file:Suppress(
"ArrayInDataClass",
"EnumEntryName",
"RemoveRedundantQualifierName",
"UnusedImport"
)
package com.xebia.functional.openai.generated.model
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*
/**
* Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.
*
* @param type The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.
* @param lastMessages The number of most recent messages from the thread when constructing the context for the run.
*/
@Serializable
data class TruncationObject (
/* The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. */
@SerialName(value = "type") val type: TruncationObject.Type,
/* The number of most recent messages from the thread when constructing the context for the run. */
@SerialName(value = "last_messages") val lastMessages: kotlin.Int? = null
) {
/**
* The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`.
*
* Values: auto,last_messages
*/
@Serializable
enum class Type(val value: kotlin.String) {
@SerialName(value = "auto") auto("auto"),
@SerialName(value = "last_messages") last_messages("last_messages");
}
}