All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.kt Maven / Gradle / Ivy

There is a newer version: 0.0.5-alpha.118
Show newest version
/**
 *
 * Please note:
 * This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
 * Do not edit this file manually.
 *
 */

@file:Suppress(
    "ArrayInDataClass",
    "EnumEntryName",
    "RemoveRedundantQualifierName",
    "UnusedImport"
)

package com.xebia.functional.openai.generated.model


import kotlinx.serialization.*
import kotlinx.serialization.builtins.*
import kotlinx.serialization.encoding.*
import kotlin.jvm.JvmStatic
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4o
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4o_2024_05_13
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4o_2024_08_06
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.chatgpt_4o_latest
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4o_mini
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4o_mini_2024_07_18
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_turbo
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_turbo_2024_04_09
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_0125_preview
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_turbo_preview
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_1106_preview
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_vision_preview
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_0314
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_0613
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_32k
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_32k_0314
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_4_32k_0613
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_16k
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_0301
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_0613
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_1106
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_0125
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel.Supported.gpt_3_5_turbo_16k_0613


/**
 * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
 */
// We define a serializer for the parent sum type, and then use it to serialize the child types
@Suppress("SERIALIZER_TYPE_INCOMPATIBLE")
@Serializable(with = CreateChatCompletionRequestModelSerializer::class)
sealed interface CreateChatCompletionRequestModel {
  val value: kotlin.String

  @Serializable(with = CreateChatCompletionRequestModelSerializer::class)
  enum class Supported(override val value: kotlin.String) : CreateChatCompletionRequestModel {
    @SerialName(value = "gpt-4o") gpt_4o("gpt-4o"),@SerialName(value = "gpt-4o-2024-05-13") gpt_4o_2024_05_13("gpt-4o-2024-05-13"),@SerialName(value = "gpt-4o-2024-08-06") gpt_4o_2024_08_06("gpt-4o-2024-08-06"),@SerialName(value = "chatgpt-4o-latest") chatgpt_4o_latest("chatgpt-4o-latest"),@SerialName(value = "gpt-4o-mini") gpt_4o_mini("gpt-4o-mini"),@SerialName(value = "gpt-4o-mini-2024-07-18") gpt_4o_mini_2024_07_18("gpt-4o-mini-2024-07-18"),@SerialName(value = "gpt-4-turbo") gpt_4_turbo("gpt-4-turbo"),@SerialName(value = "gpt-4-turbo-2024-04-09") gpt_4_turbo_2024_04_09("gpt-4-turbo-2024-04-09"),@SerialName(value = "gpt-4-0125-preview") gpt_4_0125_preview("gpt-4-0125-preview"),@SerialName(value = "gpt-4-turbo-preview") gpt_4_turbo_preview("gpt-4-turbo-preview"),@SerialName(value = "gpt-4-1106-preview") gpt_4_1106_preview("gpt-4-1106-preview"),@SerialName(value = "gpt-4-vision-preview") gpt_4_vision_preview("gpt-4-vision-preview"),@SerialName(value = "gpt-4") gpt_4("gpt-4"),@SerialName(value = "gpt-4-0314") gpt_4_0314("gpt-4-0314"),@SerialName(value = "gpt-4-0613") gpt_4_0613("gpt-4-0613"),@SerialName(value = "gpt-4-32k") gpt_4_32k("gpt-4-32k"),@SerialName(value = "gpt-4-32k-0314") gpt_4_32k_0314("gpt-4-32k-0314"),@SerialName(value = "gpt-4-32k-0613") gpt_4_32k_0613("gpt-4-32k-0613"),@SerialName(value = "gpt-3.5-turbo") gpt_3_5_turbo("gpt-3.5-turbo"),@SerialName(value = "gpt-3.5-turbo-16k") gpt_3_5_turbo_16k("gpt-3.5-turbo-16k"),@SerialName(value = "gpt-3.5-turbo-0301") gpt_3_5_turbo_0301("gpt-3.5-turbo-0301"),@SerialName(value = "gpt-3.5-turbo-0613") gpt_3_5_turbo_0613("gpt-3.5-turbo-0613"),@SerialName(value = "gpt-3.5-turbo-1106") gpt_3_5_turbo_1106("gpt-3.5-turbo-1106"),@SerialName(value = "gpt-3.5-turbo-0125") gpt_3_5_turbo_0125("gpt-3.5-turbo-0125"),@SerialName(value = "gpt-3.5-turbo-16k-0613") gpt_3_5_turbo_16k_0613("gpt-3.5-turbo-16k-0613");

    override fun toString(): kotlin.String = value
  }

  @Serializable(with = CreateChatCompletionRequestModelSerializer::class)
  data class Custom(override val value: kotlin.String) : CreateChatCompletionRequestModel

    companion object {
      @JvmStatic
      fun valueOf(value: kotlin.String): CreateChatCompletionRequestModel =
        values().firstOrNull { it.value == value } ?: Custom(value)

      
          inline val gpt_4o: CreateChatCompletionRequestModel
            get() = Supported.gpt_4o
      
          inline val gpt_4o_2024_05_13: CreateChatCompletionRequestModel
            get() = Supported.gpt_4o_2024_05_13
      
          inline val gpt_4o_2024_08_06: CreateChatCompletionRequestModel
            get() = Supported.gpt_4o_2024_08_06
      
          inline val chatgpt_4o_latest: CreateChatCompletionRequestModel
            get() = Supported.chatgpt_4o_latest
      
          inline val gpt_4o_mini: CreateChatCompletionRequestModel
            get() = Supported.gpt_4o_mini
      
          inline val gpt_4o_mini_2024_07_18: CreateChatCompletionRequestModel
            get() = Supported.gpt_4o_mini_2024_07_18
      
          inline val gpt_4_turbo: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_turbo
      
          inline val gpt_4_turbo_2024_04_09: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_turbo_2024_04_09
      
          inline val gpt_4_0125_preview: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_0125_preview
      
          inline val gpt_4_turbo_preview: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_turbo_preview
      
          inline val gpt_4_1106_preview: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_1106_preview
      
          inline val gpt_4_vision_preview: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_vision_preview
      
          inline val gpt_4: CreateChatCompletionRequestModel
            get() = Supported.gpt_4
      
          inline val gpt_4_0314: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_0314
      
          inline val gpt_4_0613: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_0613
      
          inline val gpt_4_32k: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_32k
      
          inline val gpt_4_32k_0314: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_32k_0314
      
          inline val gpt_4_32k_0613: CreateChatCompletionRequestModel
            get() = Supported.gpt_4_32k_0613
      
          inline val gpt_3_5_turbo: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo
      
          inline val gpt_3_5_turbo_16k: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_16k
      
          inline val gpt_3_5_turbo_0301: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_0301
      
          inline val gpt_3_5_turbo_0613: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_0613
      
          inline val gpt_3_5_turbo_1106: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_1106
      
          inline val gpt_3_5_turbo_0125: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_0125
      
          inline val gpt_3_5_turbo_16k_0613: CreateChatCompletionRequestModel
            get() = Supported.gpt_3_5_turbo_16k_0613
      

      @JvmStatic fun values(): List = Supported.entries
    }
}

object CreateChatCompletionRequestModelSerializer : KSerializer {
    private val valueSerializer = kotlin.String.serializer()
    override val descriptor = valueSerializer.descriptor

    override fun deserialize(decoder: Decoder): CreateChatCompletionRequestModel {
        val value = decoder.decodeSerializableValue(valueSerializer)
        return CreateChatCompletionRequestModel.valueOf(value)
    }

    override fun serialize(encoder: Encoder, value: CreateChatCompletionRequestModel) {
        encoder.encodeSerializableValue(valueSerializer, value.value)
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy