com.github.tjake.jlama.net.openai.model.CreateChatCompletionRequest Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of jlama-net Show documentation
Show all versions of jlama-net Show documentation
Jlama: A modern LLM inference engine for Java
/*
* OpenAI API
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
*
* The version of the OpenAPI document: 2.1.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package com.github.tjake.jlama.net.openai.model;
import java.util.Objects;
import java.util.Map;
import java.util.HashMap;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonTypeName;
import com.fasterxml.jackson.annotation.JsonValue;
import com.github.tjake.jlama.net.openai.model.ChatCompletionFunctions;
import com.github.tjake.jlama.net.openai.model.ChatCompletionRequestMessage;
import com.github.tjake.jlama.net.openai.model.ChatCompletionStreamOptions;
import com.github.tjake.jlama.net.openai.model.ChatCompletionTool;
import com.github.tjake.jlama.net.openai.model.ChatCompletionToolChoiceOption;
import com.github.tjake.jlama.net.openai.model.CreateChatCompletionRequestFunctionCall;
import com.github.tjake.jlama.net.openai.model.CreateChatCompletionRequestResponseFormat;
import com.github.tjake.jlama.net.openai.model.CreateChatCompletionRequestStop;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import jakarta.validation.constraints.*;
import jakarta.validation.Valid;
import org.hibernate.validator.constraints.*;
import com.github.tjake.jlama.net.openai.JSON;
/**
* CreateChatCompletionRequest
*/
@JsonPropertyOrder({
CreateChatCompletionRequest.JSON_PROPERTY_MESSAGES,
CreateChatCompletionRequest.JSON_PROPERTY_MODEL,
CreateChatCompletionRequest.JSON_PROPERTY_FREQUENCY_PENALTY,
CreateChatCompletionRequest.JSON_PROPERTY_LOGIT_BIAS,
CreateChatCompletionRequest.JSON_PROPERTY_LOGPROBS,
CreateChatCompletionRequest.JSON_PROPERTY_TOP_LOGPROBS,
CreateChatCompletionRequest.JSON_PROPERTY_MAX_TOKENS,
CreateChatCompletionRequest.JSON_PROPERTY_N,
CreateChatCompletionRequest.JSON_PROPERTY_PRESENCE_PENALTY,
CreateChatCompletionRequest.JSON_PROPERTY_RESPONSE_FORMAT,
CreateChatCompletionRequest.JSON_PROPERTY_SEED,
CreateChatCompletionRequest.JSON_PROPERTY_SERVICE_TIER,
CreateChatCompletionRequest.JSON_PROPERTY_STOP,
CreateChatCompletionRequest.JSON_PROPERTY_STREAM,
CreateChatCompletionRequest.JSON_PROPERTY_STREAM_OPTIONS,
CreateChatCompletionRequest.JSON_PROPERTY_TEMPERATURE,
CreateChatCompletionRequest.JSON_PROPERTY_TOP_P,
CreateChatCompletionRequest.JSON_PROPERTY_TOOLS,
CreateChatCompletionRequest.JSON_PROPERTY_TOOL_CHOICE,
CreateChatCompletionRequest.JSON_PROPERTY_PARALLEL_TOOL_CALLS,
CreateChatCompletionRequest.JSON_PROPERTY_USER,
CreateChatCompletionRequest.JSON_PROPERTY_FUNCTION_CALL,
CreateChatCompletionRequest.JSON_PROPERTY_FUNCTIONS
})
@jakarta.annotation.Generated(value = "org.openapitools.codegen.languages.JavaClientCodegen", comments = "Generator version: 7.7.0")
public class CreateChatCompletionRequest {
public static final String JSON_PROPERTY_MESSAGES = "messages";
private List messages;
public static final String JSON_PROPERTY_MODEL = "model";
private String model;
public static final String JSON_PROPERTY_FREQUENCY_PENALTY = "frequency_penalty";
private BigDecimal frequencyPenalty = new BigDecimal("0");
public static final String JSON_PROPERTY_LOGIT_BIAS = "logit_bias";
private Map logitBias;
public static final String JSON_PROPERTY_LOGPROBS = "logprobs";
private Boolean logprobs = false;
public static final String JSON_PROPERTY_TOP_LOGPROBS = "top_logprobs";
private Integer topLogprobs;
public static final String JSON_PROPERTY_MAX_TOKENS = "max_tokens";
private Integer maxTokens;
public static final String JSON_PROPERTY_N = "n";
private Integer n = 1;
public static final String JSON_PROPERTY_PRESENCE_PENALTY = "presence_penalty";
private BigDecimal presencePenalty = new BigDecimal("0");
public static final String JSON_PROPERTY_RESPONSE_FORMAT = "response_format";
private CreateChatCompletionRequestResponseFormat responseFormat;
public static final String JSON_PROPERTY_SEED = "seed";
private Integer seed;
/**
* Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized.
*/
public enum ServiceTierEnum {
AUTO("auto"),
DEFAULT("default");
private String value;
ServiceTierEnum(String value) {
this.value = value;
}
@JsonValue
public String getValue() {
return value;
}
@Override
public String toString() {
return String.valueOf(value);
}
@JsonCreator
public static ServiceTierEnum fromValue(String value) {
for (ServiceTierEnum b : ServiceTierEnum.values()) {
if (b.value.equals(value)) {
return b;
}
}
return null;
}
}
public static final String JSON_PROPERTY_SERVICE_TIER = "service_tier";
private ServiceTierEnum serviceTier;
public static final String JSON_PROPERTY_STOP = "stop";
private CreateChatCompletionRequestStop stop = null;
public static final String JSON_PROPERTY_STREAM = "stream";
private Boolean stream = false;
public static final String JSON_PROPERTY_STREAM_OPTIONS = "stream_options";
private ChatCompletionStreamOptions streamOptions;
public static final String JSON_PROPERTY_TEMPERATURE = "temperature";
private BigDecimal temperature = new BigDecimal("1");
public static final String JSON_PROPERTY_TOP_P = "top_p";
private BigDecimal topP = new BigDecimal("1");
public static final String JSON_PROPERTY_TOOLS = "tools";
private List<@Valid ChatCompletionTool> tools;
public static final String JSON_PROPERTY_TOOL_CHOICE = "tool_choice";
private ChatCompletionToolChoiceOption toolChoice;
public static final String JSON_PROPERTY_PARALLEL_TOOL_CALLS = "parallel_tool_calls";
private Boolean parallelToolCalls = true;
public static final String JSON_PROPERTY_USER = "user";
private String user;
public static final String JSON_PROPERTY_FUNCTION_CALL = "function_call";
@Deprecated
private CreateChatCompletionRequestFunctionCall functionCall;
public static final String JSON_PROPERTY_FUNCTIONS = "functions";
@Deprecated
private List<@Valid ChatCompletionFunctions> functions;
public CreateChatCompletionRequest() {
}
public CreateChatCompletionRequest messages(List messages) {
this.messages = messages;
return this;
}
public CreateChatCompletionRequest addMessagesItem(ChatCompletionRequestMessage messagesItem) {
if (this.messages == null) {
this.messages = new ArrayList<>();
}
this.messages.add(messagesItem);
return this;
}
/**
* A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
* @return messages
*/
@jakarta.annotation.Nonnull
@NotNull
@Valid
@Size(min=1)
@JsonProperty(JSON_PROPERTY_MESSAGES)
@JsonInclude(value = JsonInclude.Include.ALWAYS)
public List getMessages() {
return messages;
}
@JsonProperty(JSON_PROPERTY_MESSAGES)
@JsonInclude(value = JsonInclude.Include.ALWAYS)
public void setMessages(List messages) {
this.messages = messages;
}
public CreateChatCompletionRequest model(String model) {
this.model = model;
return this;
}
/**
* ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
* @return model
*/
@jakarta.annotation.Nonnull
@NotNull
@JsonProperty(JSON_PROPERTY_MODEL)
@JsonInclude(value = JsonInclude.Include.ALWAYS)
public String getModel() {
return model;
}
@JsonProperty(JSON_PROPERTY_MODEL)
@JsonInclude(value = JsonInclude.Include.ALWAYS)
public void setModel(String model) {
this.model = model;
}
public CreateChatCompletionRequest frequencyPenalty(BigDecimal frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
return this;
}
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
* minimum: -2
* maximum: 2
* @return frequencyPenalty
*/
@jakarta.annotation.Nullable
@Valid
@DecimalMin("-2") @DecimalMax("2")
@JsonProperty(JSON_PROPERTY_FREQUENCY_PENALTY)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public BigDecimal getFrequencyPenalty() {
return frequencyPenalty;
}
@JsonProperty(JSON_PROPERTY_FREQUENCY_PENALTY)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setFrequencyPenalty(BigDecimal frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
}
public CreateChatCompletionRequest logitBias(Map logitBias) {
this.logitBias = logitBias;
return this;
}
public CreateChatCompletionRequest putLogitBiasItem(String key, Integer logitBiasItem) {
if (this.logitBias == null) {
this.logitBias = new HashMap<>();
}
this.logitBias.put(key, logitBiasItem);
return this;
}
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @return logitBias
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_LOGIT_BIAS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Map getLogitBias() {
return logitBias;
}
@JsonProperty(JSON_PROPERTY_LOGIT_BIAS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setLogitBias(Map logitBias) {
this.logitBias = logitBias;
}
public CreateChatCompletionRequest logprobs(Boolean logprobs) {
this.logprobs = logprobs;
return this;
}
/**
* Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
* @return logprobs
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_LOGPROBS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Boolean getLogprobs() {
return logprobs;
}
@JsonProperty(JSON_PROPERTY_LOGPROBS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setLogprobs(Boolean logprobs) {
this.logprobs = logprobs;
}
public CreateChatCompletionRequest topLogprobs(Integer topLogprobs) {
this.topLogprobs = topLogprobs;
return this;
}
/**
* An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
* minimum: 0
* maximum: 20
* @return topLogprobs
*/
@jakarta.annotation.Nullable
@Min(0) @Max(20)
@JsonProperty(JSON_PROPERTY_TOP_LOGPROBS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Integer getTopLogprobs() {
return topLogprobs;
}
@JsonProperty(JSON_PROPERTY_TOP_LOGPROBS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setTopLogprobs(Integer topLogprobs) {
this.topLogprobs = topLogprobs;
}
public CreateChatCompletionRequest maxTokens(Integer maxTokens) {
this.maxTokens = maxTokens;
return this;
}
/**
* The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
* @return maxTokens
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_MAX_TOKENS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Integer getMaxTokens() {
return maxTokens;
}
@JsonProperty(JSON_PROPERTY_MAX_TOKENS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setMaxTokens(Integer maxTokens) {
this.maxTokens = maxTokens;
}
public CreateChatCompletionRequest n(Integer n) {
this.n = n;
return this;
}
/**
* How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
* minimum: 1
* maximum: 128
* @return n
*/
@jakarta.annotation.Nullable
@Min(1) @Max(128)
@JsonProperty(JSON_PROPERTY_N)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Integer getN() {
return n;
}
@JsonProperty(JSON_PROPERTY_N)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setN(Integer n) {
this.n = n;
}
public CreateChatCompletionRequest presencePenalty(BigDecimal presencePenalty) {
this.presencePenalty = presencePenalty;
return this;
}
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
* minimum: -2
* maximum: 2
* @return presencePenalty
*/
@jakarta.annotation.Nullable
@Valid
@DecimalMin("-2") @DecimalMax("2")
@JsonProperty(JSON_PROPERTY_PRESENCE_PENALTY)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public BigDecimal getPresencePenalty() {
return presencePenalty;
}
@JsonProperty(JSON_PROPERTY_PRESENCE_PENALTY)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setPresencePenalty(BigDecimal presencePenalty) {
this.presencePenalty = presencePenalty;
}
public CreateChatCompletionRequest responseFormat(CreateChatCompletionRequestResponseFormat responseFormat) {
this.responseFormat = responseFormat;
return this;
}
/**
* Get responseFormat
* @return responseFormat
*/
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_RESPONSE_FORMAT)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public CreateChatCompletionRequestResponseFormat getResponseFormat() {
return responseFormat;
}
@JsonProperty(JSON_PROPERTY_RESPONSE_FORMAT)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setResponseFormat(CreateChatCompletionRequestResponseFormat responseFormat) {
this.responseFormat = responseFormat;
}
public CreateChatCompletionRequest seed(Integer seed) {
this.seed = seed;
return this;
}
/**
* This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
* @return seed
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_SEED)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Integer getSeed() {
return seed;
}
@JsonProperty(JSON_PROPERTY_SEED)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setSeed(Integer seed) {
this.seed = seed;
}
public CreateChatCompletionRequest serviceTier(ServiceTierEnum serviceTier) {
this.serviceTier = serviceTier;
return this;
}
/**
* Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized.
* @return serviceTier
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_SERVICE_TIER)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public ServiceTierEnum getServiceTier() {
return serviceTier;
}
@JsonProperty(JSON_PROPERTY_SERVICE_TIER)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setServiceTier(ServiceTierEnum serviceTier) {
this.serviceTier = serviceTier;
}
public CreateChatCompletionRequest stop(CreateChatCompletionRequestStop stop) {
this.stop = stop;
return this;
}
/**
* Get stop
* @return stop
*/
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_STOP)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public CreateChatCompletionRequestStop getStop() {
return stop;
}
@JsonProperty(JSON_PROPERTY_STOP)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setStop(CreateChatCompletionRequestStop stop) {
this.stop = stop;
}
public CreateChatCompletionRequest stream(Boolean stream) {
this.stream = stream;
return this;
}
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
* @return stream
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_STREAM)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Boolean getStream() {
return stream;
}
@JsonProperty(JSON_PROPERTY_STREAM)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setStream(Boolean stream) {
this.stream = stream;
}
public CreateChatCompletionRequest streamOptions(ChatCompletionStreamOptions streamOptions) {
this.streamOptions = streamOptions;
return this;
}
/**
* Get streamOptions
* @return streamOptions
*/
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_STREAM_OPTIONS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public ChatCompletionStreamOptions getStreamOptions() {
return streamOptions;
}
@JsonProperty(JSON_PROPERTY_STREAM_OPTIONS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setStreamOptions(ChatCompletionStreamOptions streamOptions) {
this.streamOptions = streamOptions;
}
public CreateChatCompletionRequest temperature(BigDecimal temperature) {
this.temperature = temperature;
return this;
}
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* minimum: 0
* maximum: 2
* @return temperature
*/
@jakarta.annotation.Nullable
@Valid
@DecimalMin("0") @DecimalMax("2")
@JsonProperty(JSON_PROPERTY_TEMPERATURE)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public BigDecimal getTemperature() {
return temperature;
}
@JsonProperty(JSON_PROPERTY_TEMPERATURE)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setTemperature(BigDecimal temperature) {
this.temperature = temperature;
}
public CreateChatCompletionRequest topP(BigDecimal topP) {
this.topP = topP;
return this;
}
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* minimum: 0
* maximum: 1
* @return topP
*/
@jakarta.annotation.Nullable
@Valid
@DecimalMin("0") @DecimalMax("1")
@JsonProperty(JSON_PROPERTY_TOP_P)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public BigDecimal getTopP() {
return topP;
}
@JsonProperty(JSON_PROPERTY_TOP_P)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setTopP(BigDecimal topP) {
this.topP = topP;
}
public CreateChatCompletionRequest tools(List<@Valid ChatCompletionTool> tools) {
this.tools = tools;
return this;
}
public CreateChatCompletionRequest addToolsItem(ChatCompletionTool toolsItem) {
if (this.tools == null) {
this.tools = new ArrayList<>();
}
this.tools.add(toolsItem);
return this;
}
/**
* A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
* @return tools
*/
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_TOOLS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public List<@Valid ChatCompletionTool> getTools() {
return tools;
}
@JsonProperty(JSON_PROPERTY_TOOLS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setTools(List<@Valid ChatCompletionTool> tools) {
this.tools = tools;
}
public CreateChatCompletionRequest toolChoice(ChatCompletionToolChoiceOption toolChoice) {
this.toolChoice = toolChoice;
return this;
}
/**
* Get toolChoice
* @return toolChoice
*/
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_TOOL_CHOICE)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public ChatCompletionToolChoiceOption getToolChoice() {
return toolChoice;
}
@JsonProperty(JSON_PROPERTY_TOOL_CHOICE)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setToolChoice(ChatCompletionToolChoiceOption toolChoice) {
this.toolChoice = toolChoice;
}
public CreateChatCompletionRequest parallelToolCalls(Boolean parallelToolCalls) {
this.parallelToolCalls = parallelToolCalls;
return this;
}
/**
* Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.
* @return parallelToolCalls
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_PARALLEL_TOOL_CALLS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public Boolean getParallelToolCalls() {
return parallelToolCalls;
}
@JsonProperty(JSON_PROPERTY_PARALLEL_TOOL_CALLS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setParallelToolCalls(Boolean parallelToolCalls) {
this.parallelToolCalls = parallelToolCalls;
}
public CreateChatCompletionRequest user(String user) {
this.user = user;
return this;
}
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @return user
*/
@jakarta.annotation.Nullable
@JsonProperty(JSON_PROPERTY_USER)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public String getUser() {
return user;
}
@JsonProperty(JSON_PROPERTY_USER)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setUser(String user) {
this.user = user;
}
@Deprecated
public CreateChatCompletionRequest functionCall(CreateChatCompletionRequestFunctionCall functionCall) {
this.functionCall = functionCall;
return this;
}
/**
* Get functionCall
* @return functionCall
* @deprecated
*/
@Deprecated
@jakarta.annotation.Nullable
@Valid
@JsonProperty(JSON_PROPERTY_FUNCTION_CALL)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public CreateChatCompletionRequestFunctionCall getFunctionCall() {
return functionCall;
}
@Deprecated
@JsonProperty(JSON_PROPERTY_FUNCTION_CALL)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setFunctionCall(CreateChatCompletionRequestFunctionCall functionCall) {
this.functionCall = functionCall;
}
@Deprecated
public CreateChatCompletionRequest functions(List<@Valid ChatCompletionFunctions> functions) {
this.functions = functions;
return this;
}
public CreateChatCompletionRequest addFunctionsItem(ChatCompletionFunctions functionsItem) {
if (this.functions == null) {
this.functions = new ArrayList<>();
}
this.functions.add(functionsItem);
return this;
}
/**
* Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for.
* @return functions
* @deprecated
*/
@Deprecated
@jakarta.annotation.Nullable
@Valid
@Size(min=1,max=128)
@JsonProperty(JSON_PROPERTY_FUNCTIONS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public List<@Valid ChatCompletionFunctions> getFunctions() {
return functions;
}
@Deprecated
@JsonProperty(JSON_PROPERTY_FUNCTIONS)
@JsonInclude(value = JsonInclude.Include.USE_DEFAULTS)
public void setFunctions(List<@Valid ChatCompletionFunctions> functions) {
this.functions = functions;
}
/**
* Return true if this CreateChatCompletionRequest object is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
CreateChatCompletionRequest createChatCompletionRequest = (CreateChatCompletionRequest) o;
return Objects.equals(this.messages, createChatCompletionRequest.messages) &&
Objects.equals(this.model, createChatCompletionRequest.model) &&
Objects.equals(this.frequencyPenalty, createChatCompletionRequest.frequencyPenalty) &&
Objects.equals(this.logitBias, createChatCompletionRequest.logitBias) &&
Objects.equals(this.logprobs, createChatCompletionRequest.logprobs) &&
Objects.equals(this.topLogprobs, createChatCompletionRequest.topLogprobs) &&
Objects.equals(this.maxTokens, createChatCompletionRequest.maxTokens) &&
Objects.equals(this.n, createChatCompletionRequest.n) &&
Objects.equals(this.presencePenalty, createChatCompletionRequest.presencePenalty) &&
Objects.equals(this.responseFormat, createChatCompletionRequest.responseFormat) &&
Objects.equals(this.seed, createChatCompletionRequest.seed) &&
Objects.equals(this.serviceTier, createChatCompletionRequest.serviceTier) &&
Objects.equals(this.stop, createChatCompletionRequest.stop) &&
Objects.equals(this.stream, createChatCompletionRequest.stream) &&
Objects.equals(this.streamOptions, createChatCompletionRequest.streamOptions) &&
Objects.equals(this.temperature, createChatCompletionRequest.temperature) &&
Objects.equals(this.topP, createChatCompletionRequest.topP) &&
Objects.equals(this.tools, createChatCompletionRequest.tools) &&
Objects.equals(this.toolChoice, createChatCompletionRequest.toolChoice) &&
Objects.equals(this.parallelToolCalls, createChatCompletionRequest.parallelToolCalls) &&
Objects.equals(this.user, createChatCompletionRequest.user) &&
Objects.equals(this.functionCall, createChatCompletionRequest.functionCall) &&
Objects.equals(this.functions, createChatCompletionRequest.functions);
}
@Override
public int hashCode() {
return Objects.hash(messages, model, frequencyPenalty, logitBias, logprobs, topLogprobs, maxTokens, n, presencePenalty, responseFormat, seed, serviceTier, stop, stream, streamOptions, temperature, topP, tools, toolChoice, parallelToolCalls, user, functionCall, functions);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class CreateChatCompletionRequest {\n");
sb.append(" messages: ").append(toIndentedString(messages)).append("\n");
sb.append(" model: ").append(toIndentedString(model)).append("\n");
sb.append(" frequencyPenalty: ").append(toIndentedString(frequencyPenalty)).append("\n");
sb.append(" logitBias: ").append(toIndentedString(logitBias)).append("\n");
sb.append(" logprobs: ").append(toIndentedString(logprobs)).append("\n");
sb.append(" topLogprobs: ").append(toIndentedString(topLogprobs)).append("\n");
sb.append(" maxTokens: ").append(toIndentedString(maxTokens)).append("\n");
sb.append(" n: ").append(toIndentedString(n)).append("\n");
sb.append(" presencePenalty: ").append(toIndentedString(presencePenalty)).append("\n");
sb.append(" responseFormat: ").append(toIndentedString(responseFormat)).append("\n");
sb.append(" seed: ").append(toIndentedString(seed)).append("\n");
sb.append(" serviceTier: ").append(toIndentedString(serviceTier)).append("\n");
sb.append(" stop: ").append(toIndentedString(stop)).append("\n");
sb.append(" stream: ").append(toIndentedString(stream)).append("\n");
sb.append(" streamOptions: ").append(toIndentedString(streamOptions)).append("\n");
sb.append(" temperature: ").append(toIndentedString(temperature)).append("\n");
sb.append(" topP: ").append(toIndentedString(topP)).append("\n");
sb.append(" tools: ").append(toIndentedString(tools)).append("\n");
sb.append(" toolChoice: ").append(toIndentedString(toolChoice)).append("\n");
sb.append(" parallelToolCalls: ").append(toIndentedString(parallelToolCalls)).append("\n");
sb.append(" user: ").append(toIndentedString(user)).append("\n");
sb.append(" functionCall: ").append(toIndentedString(functionCall)).append("\n");
sb.append(" functions: ").append(toIndentedString(functions)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}