commonMain.com.xebia.functional.openai.generated.model.CreateAssistantRequest.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of xef-openai-client Show documentation
Show all versions of xef-openai-client Show documentation
Building applications with LLMs through composability in Kotlin
/**
*
* Please note:
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*
*/
@file:Suppress(
"ArrayInDataClass",
"EnumEntryName",
"RemoveRedundantQualifierName",
"UnusedImport"
)
package com.xebia.functional.openai.generated.model
import com.xebia.functional.openai.generated.model.AssistantObjectToolsInner
import com.xebia.functional.openai.generated.model.AssistantsApiResponseFormatOption
import com.xebia.functional.openai.generated.model.CreateAssistantRequestModel
import com.xebia.functional.openai.generated.model.CreateAssistantRequestToolResources
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*
/**
*
*
* @param model
* @param name The name of the assistant. The maximum length is 256 characters.
* @param description The description of the assistant. The maximum length is 512 characters.
* @param instructions The system instructions that the assistant uses. The maximum length is 256,000 characters.
* @param tools A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
* @param toolResources
* @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
* @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
* @param topP An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
* @param responseFormat
*/
@Serializable
data class CreateAssistantRequest (
@SerialName(value = "model") val model: CreateAssistantRequestModel,
/* The name of the assistant. The maximum length is 256 characters. */
@SerialName(value = "name") val name: kotlin.String? = null,
/* The description of the assistant. The maximum length is 512 characters. */
@SerialName(value = "description") val description: kotlin.String? = null,
/* The system instructions that the assistant uses. The maximum length is 256,000 characters. */
@SerialName(value = "instructions") val instructions: kotlin.String? = null,
/* A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. */
@SerialName(value = "tools") val tools: kotlin.collections.List? = arrayListOf(),
@SerialName(value = "tool_resources") val toolResources: CreateAssistantRequestToolResources? = null,
/* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */
@SerialName(value = "metadata") val metadata: kotlinx.serialization.json.JsonObject? = null,
/* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. */
@SerialName(value = "temperature") val temperature: kotlin.Double? = (1).toDouble(),
/* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. */
@SerialName(value = "top_p") val topP: kotlin.Double? = (1).toDouble(),
@SerialName(value = "response_format") val responseFormat: AssistantsApiResponseFormatOption? = null
) {
}