All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponse.kt Maven / Gradle / Ivy

The newest version!
/**
 *
 * Please note:
 * This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
 * Do not edit this file manually.
 *
 */

@file:Suppress(
    "ArrayInDataClass",
    "EnumEntryName",
    "RemoveRedundantQualifierName",
    "UnusedImport"
)

package com.xebia.functional.openai.generated.model

import com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponseChoicesInner
import com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponseUsage



import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*

/**
* Represents a streamed chunk of a chat completion response returned by model, based on the provided input.
*
    * @param id A unique identifier for the chat completion. Each chunk has the same ID.
    * @param choices A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`. 
    * @param created The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
    * @param model The model to generate the completion.
    * @param `object` The object type, which is always `chat.completion.chunk`.
    * @param serviceTier The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.
    * @param systemFingerprint This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. 
    * @param usage 
*/
@Serializable


data class CreateChatCompletionStreamResponse (
        /* A unique identifier for the chat completion. Each chunk has the same ID. */
    @SerialName(value = "id") val id: kotlin.String,
        /* A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`.  */
    @SerialName(value = "choices") val choices: kotlin.collections.List,
        /* The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. */
    @SerialName(value = "created") val created: kotlin.Int,
        /* The model to generate the completion. */
    @SerialName(value = "model") val model: kotlin.String,
        /* The object type, which is always `chat.completion.chunk`. */
    @SerialName(value = "object") val `object`: CreateChatCompletionStreamResponse.`Object`,
        /* The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. */
    @SerialName(value = "service_tier") val serviceTier: CreateChatCompletionStreamResponse.ServiceTier? = null,
        /* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.  */
    @SerialName(value = "system_fingerprint") val systemFingerprint: kotlin.String? = null,
        @SerialName(value = "usage") val usage: CreateChatCompletionStreamResponseUsage? = null
) {

            /**
            * The object type, which is always `chat.completion.chunk`.
            *
            * Values: chat_completion_chunk
            */
            @Serializable
            enum class `Object`(val value: kotlin.String) {
                @SerialName(value = "chat.completion.chunk") chat_completion_chunk("chat.completion.chunk");
                }
            /**
            * The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request.
            *
            * Values: scale,default
            */
            @Serializable
            enum class ServiceTier(val value: kotlin.String) {
                @SerialName(value = "scale") scale("scale"),
                @SerialName(value = "default") default("default");
                }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy