commonMain.com.xebia.functional.openai.generated.model.CreateChatCompletionStreamResponseUsage.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of xef-openai-client-jvm Show documentation
Show all versions of xef-openai-client-jvm Show documentation
Building applications with LLMs through composability in Kotlin
/**
*
* Please note:
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* Do not edit this file manually.
*
*/
@file:Suppress(
"ArrayInDataClass",
"EnumEntryName",
"RemoveRedundantQualifierName",
"UnusedImport"
)
package com.xebia.functional.openai.generated.model
import kotlinx.serialization.Serializable
import kotlinx.serialization.SerialName
import kotlinx.serialization.Contextual
import kotlin.js.JsName
import kotlinx.serialization.json.*
/**
* An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request. When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.
*
* @param completionTokens Number of tokens in the generated completion.
* @param promptTokens Number of tokens in the prompt.
* @param totalTokens Total number of tokens used in the request (prompt + completion).
*/
@Serializable
data class CreateChatCompletionStreamResponseUsage (
/* Number of tokens in the generated completion. */
@SerialName(value = "completion_tokens") val completionTokens: kotlin.Int,
/* Number of tokens in the prompt. */
@SerialName(value = "prompt_tokens") val promptTokens: kotlin.Int,
/* Total number of tokens used in the request (prompt + completion). */
@SerialName(value = "total_tokens") val totalTokens: kotlin.Int
) {
}