io.kestra.plugin.openai.ChatCompletion Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of plugin-openai Show documentation
Show all versions of plugin-openai Show documentation
Interact with OpenAI from Kestra.
The newest version!
package io.kestra.plugin.openai;
import com.theokanning.openai.Usage;
import com.theokanning.openai.completion.chat.*;
import com.theokanning.openai.service.OpenAiService;
import io.kestra.core.models.annotations.Example;
import io.kestra.core.models.annotations.Plugin;
import io.kestra.core.models.executions.metrics.Counter;
import io.kestra.core.models.property.Property;
import io.kestra.core.models.tasks.RunnableTask;
import io.kestra.core.runners.RunContext;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.*;
import lombok.experimental.SuperBuilder;
import jakarta.validation.constraints.NotNull;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@SuperBuilder
@ToString
@EqualsAndHashCode
@Getter
@NoArgsConstructor
@Schema(
title = "Given a prompt, get a response from an LLM using the [OpenAI’s Chat Completions API](https://platform.openai.com/docs/api-reference/chat/create).",
description = "For more information, refer to the [Chat Completions API docs](https://platform.openai.com/docs/guides/gpt/chat-completions-api)."
)
@Plugin(
examples = {
@Example(
title = "Based on a prompt input, generate a completion response and pass it to a downstream task.",
full = true,
code = """
id: openai
namespace: company.team
inputs:
- id: prompt
type: STRING
defaults: What is data orchestration?
tasks:
- id: completion
type: io.kestra.plugin.openai.ChatCompletion
apiKey: "yourOpenAIapiKey"
model: gpt-4o
prompt: "{{ inputs.prompt }}"
- id: response
type: io.kestra.plugin.core.debug.Return
format: {{ outputs.completion.choices[0].message.content }}"
"""
),
@Example(
title = "Based on a prompt input, ask OpenAI to call a function that determines whether you need to " +
"respond to a customer's review immediately or wait until later, and then comes up with a " +
"suggested response.",
full = true,
code = """
id: openai
namespace: company.team
inputs:
- id: prompt
type: STRING
defaults: I love your product and would purchase it again!
tasks:
- id: prioritize_response
type: io.kestra.plugin.openai.ChatCompletion
apiKey: "yourOpenAIapiKey"
model: gpt-4o
messages:
- role: user
content: "{{ inputs.prompt }}"
functions:
- name: respond_to_review
description: Given the customer product review provided as input, determines how urgently a reply is required and then provides suggested response text.
parameters:
- name: response_urgency
type: string
description: How urgently this customer review needs a reply. Bad reviews
must be addressed immediately before anyone sees them. Good reviews can
wait until later.
required: true
enumValues:
- reply_immediately
- reply_later
- name: response_text
type: string
description: The text to post online in response to this review.
required: true
- id: response_urgency
type: io.kestra.plugin.core.debug.Return
format: "{{ outputs.prioritize_response.choices[0].message.function_call.arguments.response_urgency }}"
- id: response_text
type: io.kestra.plugin.core.debug.Return
format: "{{ outputs.prioritize_response.choices[0].message.function_call.arguments.response_text }}"
"""
)
}
)
public class ChatCompletion extends AbstractTask implements RunnableTask {
@Schema(
title = "A list of messages comprising the conversation so far.",
description = "Required if prompt is not set."
)
private Property> messages;
@Schema(
title = "The function call(s) the API can use when generating completions."
)
private Property> functions;
@Schema(
title = "The name of the function OpenAI should generate a call for.",
description = "Enter a specific function name, or 'auto' to let the model decide. The default is auto."
)
private Property functionCall;
@Schema(
title = "The prompt(s) to generate completions for. By default, this prompt will be sent as a `user` role.",
description = "If not provided, make sure to set the `messages` property."
)
private Property prompt;
@Schema(
title = "What sampling temperature to use, between 0 and 2. Defaults to 1."
)
private Property temperature;
@Schema(
title = "An alternative to sampling with temperature, where the model considers the results of the tokens with top_p probability mass. Defaults to 1."
)
private Property topP;
@Schema(
title = "How many chat completion choices to generate for each input message. Defaults to 1."
)
private Property n;
@Schema(
title = "Up to 4 sequences where the API will stop generating further tokens. Defaults to null."
)
private Property> stop;
@Schema(
title = "The maximum number of tokens to generate in the chat completion. No limits are set by default."
)
private Property maxTokens;
@Schema(
title = "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far. Defaults to 0."
)
private Property presencePenalty;
@Schema(
title = "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far. Defaults to 0."
)
private Property frequencyPenalty;
@Schema(
title = "Modify the likelihood of specified tokens appearing in the completion. Defaults to null."
)
private Property
© 2015 - 2025 Weber Informatics LLC | Privacy Policy