All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.com.xebia.functional.openai.generated.model.CreateThreadAndRunRequest.kt Maven / Gradle / Ivy

There is a newer version: 0.0.5-alpha.119
Show newest version
/**
 *
 * Please note:
 * This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
 * Do not edit this file manually.
 *
 */

@file:Suppress(
    "ArrayInDataClass",
    "EnumEntryName",
    "RemoveRedundantQualifierName",
    "UnusedImport"
)

package com.xebia.functional.openai.generated.model

import com.xebia.functional.openai.generated.model.AssistantsApiResponseFormatOption
import com.xebia.functional.openai.generated.model.AssistantsApiToolChoiceOption
import com.xebia.functional.openai.generated.model.CreateRunRequestModel
import com.xebia.functional.openai.generated.model.CreateThreadAndRunRequestToolResources
import com.xebia.functional.openai.generated.model.CreateThreadAndRunRequestToolsInner
import com.xebia.functional.openai.generated.model.CreateThreadRequest
import com.xebia.functional.openai.generated.model.TruncationObject



import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*

/**
* 
*
    * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.
    * @param thread 
    * @param model 
    * @param instructions Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.
    * @param tools Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
    * @param toolResources 
    * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. 
    * @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. 
    * @param topP An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.  We generally recommend altering this or temperature but not both. 
    * @param stream If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. 
    * @param maxPromptTokens The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. 
    * @param maxCompletionTokens The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. 
    * @param truncationStrategy 
    * @param toolChoice 
    * @param parallelToolCalls Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.
    * @param responseFormat 
*/
@Serializable


data class CreateThreadAndRunRequest (
        /* The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. */
    @SerialName(value = "assistant_id") val assistantId: kotlin.String,
        @SerialName(value = "thread") val thread: CreateThreadRequest? = null,
        @SerialName(value = "model") val model: CreateRunRequestModel? = null,
        /* Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. */
    @SerialName(value = "instructions") val instructions: kotlin.String? = null,
        /* Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */
    @SerialName(value = "tools") val tools: kotlin.collections.List? = null,
        @SerialName(value = "tool_resources") val toolResources: CreateThreadAndRunRequestToolResources? = null,
        /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.  */
    @SerialName(value = "metadata") val metadata: kotlinx.serialization.json.JsonObject? = null,
        /* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.  */
    @SerialName(value = "temperature") val temperature: kotlin.Double? = (1).toDouble(),
        /* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.  We generally recommend altering this or temperature but not both.  */
    @SerialName(value = "top_p") val topP: kotlin.Double? = (1).toDouble(),
        /* If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.  */
    @SerialName(value = "stream") val stream: kotlin.Boolean? = null,
        /* The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.  */
    @SerialName(value = "max_prompt_tokens") val maxPromptTokens: kotlin.Int? = null,
        /* The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.  */
    @SerialName(value = "max_completion_tokens") val maxCompletionTokens: kotlin.Int? = null,
        @SerialName(value = "truncation_strategy") val truncationStrategy: TruncationObject? = null,
        @SerialName(value = "tool_choice") val toolChoice: AssistantsApiToolChoiceOption? = null,
        /* Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use. */
    @SerialName(value = "parallel_tool_calls") val parallelToolCalls: kotlin.Boolean? = true,
        @SerialName(value = "response_format") val responseFormat: AssistantsApiResponseFormatOption? = null
) {

}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy