commonMain.com.xebia.functional.openai.generated.model.CreateChatCompletionResponse.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of xef-openai-client-jvm Show documentation
Show all versions of xef-openai-client-jvm Show documentation
Building applications with LLMs through composability in Kotlin
/**
*
* Please note:
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*
*/
@file:Suppress(
"ArrayInDataClass",
"EnumEntryName",
"RemoveRedundantQualifierName",
"UnusedImport"
)
package com.xebia.functional.openai.generated.model
import com.xebia.functional.openai.generated.model.CompletionUsage
import com.xebia.functional.openai.generated.model.CreateChatCompletionResponseChoicesInner
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*
/**
* Represents a chat completion response returned by model, based on the provided input.
*
* @param id A unique identifier for the chat completion.
* @param choices A list of chat completion choices. Can be more than one if `n` is greater than 1.
* @param created The Unix timestamp (in seconds) of when the chat completion was created.
* @param model The model used for the chat completion.
* @param `object` The object type, which is always `chat.completion`.
* @param serviceTier The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.
* @param systemFingerprint This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
* @param usage
*/
@Serializable
data class CreateChatCompletionResponse (
/* A unique identifier for the chat completion. */
@SerialName(value = "id") val id: kotlin.String,
/* A list of chat completion choices. Can be more than one if `n` is greater than 1. */
@SerialName(value = "choices") val choices: kotlin.collections.List,
/* The Unix timestamp (in seconds) of when the chat completion was created. */
@SerialName(value = "created") val created: kotlin.Int,
/* The model used for the chat completion. */
@SerialName(value = "model") val model: kotlin.String,
/* The object type, which is always `chat.completion`. */
@SerialName(value = "object") val `object`: CreateChatCompletionResponse.`Object`,
/* The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. */
@SerialName(value = "service_tier") val serviceTier: CreateChatCompletionResponse.ServiceTier? = null,
/* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. */
@SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null,
@SerialName(value = "usage") val usage: CompletionUsage? = null
) {
/**
* The object type, which is always `chat.completion`.
*
* Values: chat_completion
*/
@Serializable
enum class `Object`(val value: kotlin.String) {
@SerialName(value = "chat.completion") chat_completion("chat.completion");
}
/**
* The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.
*
* Values: scale,default
*/
@Serializable
enum class ServiceTier(val value: kotlin.String) {
@SerialName(value = "scale") scale("scale"),
@SerialName(value = "default") default("default");
}
}