commonMain.com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponse.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of xef-openai-client Show documentation
Show all versions of xef-openai-client Show documentation
Building applications with LLMs through composability in Kotlin
/**
*
* Please note:
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*
*/
@file:Suppress(
"ArrayInDataClass",
"EnumEntryName",
"RemoveRedundantQualifierName",
"UnusedImport"
)
package com.xebia.functional.openai.generated.model
import com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponseChoicesInner
import com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponseUsage
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*
/**
* Represents a streamed chunk of a chat completion response returned by model, based on the provided input.
*
* @param id A unique identifier for the chat completion. Each chunk has the same ID.
* @param choices A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`.
* @param created The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
* @param model The model to generate the completion.
* @param `object` The object type, which is always `chat.completion.chunk`.
* @param systemFingerprint This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
* @param usage
*/
@Serializable
data class CreateChatCompletionStreamResponse (
/* A unique identifier for the chat completion. Each chunk has the same ID. */
@SerialName(value = "id") val id: kotlin.String,
/* A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`. */
@SerialName(value = "choices") val choices: kotlin.collections.List,
/* The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. */
@SerialName(value = "created") val created: kotlin.Int,
/* The model to generate the completion. */
@SerialName(value = "model") val model: kotlin.String,
/* The object type, which is always `chat.completion.chunk`. */
@SerialName(value = "object") val `object`: CreateChatCompletionStreamResponse.`Object`,
/* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. */
@SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null,
@SerialName(value = "usage") val usage: CreateChatCompletionStreamResponseUsage? = null
) {
/**
* The object type, which is always `chat.completion.chunk`.
*
* Values: chat_completion_chunk
*/
@Serializable
enum class `Object`(val value: kotlin.String) {
@SerialName(value = "chat.completion.chunk") chat_completion_chunk("chat.completion.chunk");
}
}