All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.com.xebia.functional.openai.generated.model.CreateRunRequestModel.kt Maven / Gradle / Ivy

There is a newer version: 0.0.5-alpha.119
Show newest version
/**
 *
 * Please note:
 * This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
 * Do not edit this file manually.
 *
 */

@file:Suppress(
    "ArrayInDataClass",
    "EnumEntryName",
    "RemoveRedundantQualifierName",
    "UnusedImport"
)

package com.xebia.functional.openai.generated.model


import kotlinx.serialization.*
import kotlinx.serialization.builtins.*
import kotlinx.serialization.encoding.*
import kotlin.jvm.JvmStatic
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4o
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4o_2024_08_06
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4o_2024_05_13
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4o_mini
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4o_mini_2024_07_18
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_turbo
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_turbo_2024_04_09
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_0125_preview
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_turbo_preview
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_1106_preview
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_vision_preview
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_0314
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_0613
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_32k
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_32k_0314
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_4_32k_0613
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo_16k
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo_0613
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo_1106
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo_0125
import com.xebia.functional.openai.generated.model.CreateRunRequestModel.Supported.gpt_3_5_turbo_16k_0613


/**
 * The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
 */
// We define a serializer for the parent sum type, and then use it to serialize the child types
@Suppress("SERIALIZER_TYPE_INCOMPATIBLE")
@Serializable(with = CreateRunRequestModelSerializer::class)
sealed interface CreateRunRequestModel {
  val value: kotlin.String

  @Serializable(with = CreateRunRequestModelSerializer::class)
  enum class Supported(override val value: kotlin.String) : CreateRunRequestModel {
    @SerialName(value = "gpt-4o") gpt_4o("gpt-4o"),@SerialName(value = "gpt-4o-2024-08-06") gpt_4o_2024_08_06("gpt-4o-2024-08-06"),@SerialName(value = "gpt-4o-2024-05-13") gpt_4o_2024_05_13("gpt-4o-2024-05-13"),@SerialName(value = "gpt-4o-mini") gpt_4o_mini("gpt-4o-mini"),@SerialName(value = "gpt-4o-mini-2024-07-18") gpt_4o_mini_2024_07_18("gpt-4o-mini-2024-07-18"),@SerialName(value = "gpt-4-turbo") gpt_4_turbo("gpt-4-turbo"),@SerialName(value = "gpt-4-turbo-2024-04-09") gpt_4_turbo_2024_04_09("gpt-4-turbo-2024-04-09"),@SerialName(value = "gpt-4-0125-preview") gpt_4_0125_preview("gpt-4-0125-preview"),@SerialName(value = "gpt-4-turbo-preview") gpt_4_turbo_preview("gpt-4-turbo-preview"),@SerialName(value = "gpt-4-1106-preview") gpt_4_1106_preview("gpt-4-1106-preview"),@SerialName(value = "gpt-4-vision-preview") gpt_4_vision_preview("gpt-4-vision-preview"),@SerialName(value = "gpt-4") gpt_4("gpt-4"),@SerialName(value = "gpt-4-0314") gpt_4_0314("gpt-4-0314"),@SerialName(value = "gpt-4-0613") gpt_4_0613("gpt-4-0613"),@SerialName(value = "gpt-4-32k") gpt_4_32k("gpt-4-32k"),@SerialName(value = "gpt-4-32k-0314") gpt_4_32k_0314("gpt-4-32k-0314"),@SerialName(value = "gpt-4-32k-0613") gpt_4_32k_0613("gpt-4-32k-0613"),@SerialName(value = "gpt-3.5-turbo") gpt_3_5_turbo("gpt-3.5-turbo"),@SerialName(value = "gpt-3.5-turbo-16k") gpt_3_5_turbo_16k("gpt-3.5-turbo-16k"),@SerialName(value = "gpt-3.5-turbo-0613") gpt_3_5_turbo_0613("gpt-3.5-turbo-0613"),@SerialName(value = "gpt-3.5-turbo-1106") gpt_3_5_turbo_1106("gpt-3.5-turbo-1106"),@SerialName(value = "gpt-3.5-turbo-0125") gpt_3_5_turbo_0125("gpt-3.5-turbo-0125"),@SerialName(value = "gpt-3.5-turbo-16k-0613") gpt_3_5_turbo_16k_0613("gpt-3.5-turbo-16k-0613");

    override fun toString(): kotlin.String = value
  }

  @Serializable(with = CreateRunRequestModelSerializer::class)
  data class Custom(override val value: kotlin.String) : CreateRunRequestModel

    companion object {
      @JvmStatic
      fun valueOf(value: kotlin.String): CreateRunRequestModel =
        values().firstOrNull { it.value == value } ?: Custom(value)

      
          inline val gpt_4o: CreateRunRequestModel
            get() = Supported.gpt_4o
      
          inline val gpt_4o_2024_08_06: CreateRunRequestModel
            get() = Supported.gpt_4o_2024_08_06
      
          inline val gpt_4o_2024_05_13: CreateRunRequestModel
            get() = Supported.gpt_4o_2024_05_13
      
          inline val gpt_4o_mini: CreateRunRequestModel
            get() = Supported.gpt_4o_mini
      
          inline val gpt_4o_mini_2024_07_18: CreateRunRequestModel
            get() = Supported.gpt_4o_mini_2024_07_18
      
          inline val gpt_4_turbo: CreateRunRequestModel
            get() = Supported.gpt_4_turbo
      
          inline val gpt_4_turbo_2024_04_09: CreateRunRequestModel
            get() = Supported.gpt_4_turbo_2024_04_09
      
          inline val gpt_4_0125_preview: CreateRunRequestModel
            get() = Supported.gpt_4_0125_preview
      
          inline val gpt_4_turbo_preview: CreateRunRequestModel
            get() = Supported.gpt_4_turbo_preview
      
          inline val gpt_4_1106_preview: CreateRunRequestModel
            get() = Supported.gpt_4_1106_preview
      
          inline val gpt_4_vision_preview: CreateRunRequestModel
            get() = Supported.gpt_4_vision_preview
      
          inline val gpt_4: CreateRunRequestModel
            get() = Supported.gpt_4
      
          inline val gpt_4_0314: CreateRunRequestModel
            get() = Supported.gpt_4_0314
      
          inline val gpt_4_0613: CreateRunRequestModel
            get() = Supported.gpt_4_0613
      
          inline val gpt_4_32k: CreateRunRequestModel
            get() = Supported.gpt_4_32k
      
          inline val gpt_4_32k_0314: CreateRunRequestModel
            get() = Supported.gpt_4_32k_0314
      
          inline val gpt_4_32k_0613: CreateRunRequestModel
            get() = Supported.gpt_4_32k_0613
      
          inline val gpt_3_5_turbo: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo
      
          inline val gpt_3_5_turbo_16k: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo_16k
      
          inline val gpt_3_5_turbo_0613: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo_0613
      
          inline val gpt_3_5_turbo_1106: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo_1106
      
          inline val gpt_3_5_turbo_0125: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo_0125
      
          inline val gpt_3_5_turbo_16k_0613: CreateRunRequestModel
            get() = Supported.gpt_3_5_turbo_16k_0613
      

      @JvmStatic fun values(): List = Supported.entries
    }
}

object CreateRunRequestModelSerializer : KSerializer {
    private val valueSerializer = kotlin.String.serializer()
    override val descriptor = valueSerializer.descriptor

    override fun deserialize(decoder: Decoder): CreateRunRequestModel {
        val value = decoder.decodeSerializableValue(valueSerializer)
        return CreateRunRequestModel.valueOf(value)
    }

    override fun serialize(encoder: Encoder, value: CreateRunRequestModel) {
        encoder.encodeSerializableValue(valueSerializer, value.value)
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy