All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.dialogflow.v2.InferenceParameterOrBuilder Maven / Gradle / Ivy

There is a newer version: 4.59.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dialogflow/v2/generator.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.dialogflow.v2;

public interface InferenceParameterOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.dialogflow.v2.InferenceParameter)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * Optional. Maximum number of the output tokens for the generator.
   * 
* * optional int32 max_output_tokens = 1 [(.google.api.field_behavior) = OPTIONAL]; * * @return Whether the maxOutputTokens field is set. */ boolean hasMaxOutputTokens(); /** * * *
   * Optional. Maximum number of the output tokens for the generator.
   * 
* * optional int32 max_output_tokens = 1 [(.google.api.field_behavior) = OPTIONAL]; * * @return The maxOutputTokens. */ int getMaxOutputTokens(); /** * * *
   * Optional. Controls the randomness of LLM predictions.
   * Low temperature = less random. High temperature = more random.
   * If unset (or 0), uses a default value of 0.
   * 
* * optional double temperature = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return Whether the temperature field is set. */ boolean hasTemperature(); /** * * *
   * Optional. Controls the randomness of LLM predictions.
   * Low temperature = less random. High temperature = more random.
   * If unset (or 0), uses a default value of 0.
   * 
* * optional double temperature = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The temperature. */ double getTemperature(); /** * * *
   * Optional. Top-k changes how the model selects tokens for output. A top-k of
   * 1 means the selected token is the most probable among all tokens in the
   * model's vocabulary (also called greedy decoding), while a top-k of 3 means
   * that the next token is selected from among the 3 most probable tokens
   * (using temperature). For each token selection step, the top K tokens with
   * the highest probabilities are sampled. Then tokens are further filtered
   * based on topP with the final token selected using temperature sampling.
   * Specify a lower value for less random responses and a higher value for more
   * random responses. Acceptable value is [1, 40], default to 40.
   * 
* * optional int32 top_k = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return Whether the topK field is set. */ boolean hasTopK(); /** * * *
   * Optional. Top-k changes how the model selects tokens for output. A top-k of
   * 1 means the selected token is the most probable among all tokens in the
   * model's vocabulary (also called greedy decoding), while a top-k of 3 means
   * that the next token is selected from among the 3 most probable tokens
   * (using temperature). For each token selection step, the top K tokens with
   * the highest probabilities are sampled. Then tokens are further filtered
   * based on topP with the final token selected using temperature sampling.
   * Specify a lower value for less random responses and a higher value for more
   * random responses. Acceptable value is [1, 40], default to 40.
   * 
* * optional int32 top_k = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The topK. */ int getTopK(); /** * * *
   * Optional. Top-p changes how the model selects tokens for output. Tokens are
   * selected from most K (see topK parameter) probable to least until the sum
   * of their probabilities equals the top-p value. For example, if tokens A, B,
   * and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5,
   * then the model will select either A or B as the next token (using
   * temperature) and doesn't consider C. The default top-p value is 0.95.
   * Specify a lower value for less random responses and a higher value for more
   * random responses. Acceptable value is [0.0, 1.0], default to 0.95.
   * 
* * optional double top_p = 4 [(.google.api.field_behavior) = OPTIONAL]; * * @return Whether the topP field is set. */ boolean hasTopP(); /** * * *
   * Optional. Top-p changes how the model selects tokens for output. Tokens are
   * selected from most K (see topK parameter) probable to least until the sum
   * of their probabilities equals the top-p value. For example, if tokens A, B,
   * and C have a probability of 0.3, 0.2, and 0.1 and the top-p value is 0.5,
   * then the model will select either A or B as the next token (using
   * temperature) and doesn't consider C. The default top-p value is 0.95.
   * Specify a lower value for less random responses and a higher value for more
   * random responses. Acceptable value is [0.0, 1.0], default to 0.95.
   * 
* * optional double top_p = 4 [(.google.api.field_behavior) = OPTIONAL]; * * @return The topP. */ double getTopP(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy