All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.cequence.openaiscala.task.domain.TextCompletionTaskSettings.scala Maven / Gradle / Ivy

The newest version!
package io.cequence.openaiscala.task.domain

case class TextCompletionTaskSettings(
  // ID of the model to use
  model: String,

  // The maximum number of tokens to generate in the completion.
  // The token count of your prompt plus max_tokens cannot exceed the model's context length.
  // Most models have a context length of 2048 tokens (except for the newest models, which support 4096). Defaults to 16.
  max_tokens: Option[Int] = None,

  // What sampling temperature to use, between 0 and 2.
  // Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
  // We generally recommend altering this or top_p but not both. Defaults to 1.
  temperature: Option[Double] = None,

  // An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
  // So 0.1 means only the tokens comprising the top 10% probability mass are considered.
  // We generally recommend altering this or temperature but not both. Defaults to 1.
  top_p: Option[Double] = None,

  // The number of times to re-run the task/prompt through the completion.
  repetitions: Int = 100,

  // The number of parallel requests to make to the OpenAI API.
  parallelism: Int = 1
)




© 2015 - 2024 Weber Informatics LLC | Privacy Policy