All Downloads are FREE. Search and download functionalities are using the official Maven repository.

google.cloud.dialogflow.cx.v3beta1.session.proto Maven / Gradle / Ivy

The newest version!
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

package google.cloud.dialogflow.cx.v3beta1;

import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/dialogflow/cx/v3beta1/advanced_settings.proto";
import "google/cloud/dialogflow/cx/v3beta1/audio_config.proto";
import "google/cloud/dialogflow/cx/v3beta1/data_store_connection.proto";
import "google/cloud/dialogflow/cx/v3beta1/example.proto";
import "google/cloud/dialogflow/cx/v3beta1/flow.proto";
import "google/cloud/dialogflow/cx/v3beta1/generative_settings.proto";
import "google/cloud/dialogflow/cx/v3beta1/intent.proto";
import "google/cloud/dialogflow/cx/v3beta1/page.proto";
import "google/cloud/dialogflow/cx/v3beta1/response_message.proto";
import "google/cloud/dialogflow/cx/v3beta1/session_entity_type.proto";
import "google/cloud/dialogflow/cx/v3beta1/tool_call.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/struct.proto";
import "google/rpc/status.proto";
import "google/type/latlng.proto";

option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3Beta1";
option go_package = "cloud.google.com/go/dialogflow/cx/apiv3beta1/cxpb;cxpb";
option java_multiple_files = true;
option java_outer_classname = "SessionProto";
option java_package = "com.google.cloud.dialogflow.cx.v3beta1";
option objc_class_prefix = "DF";
option ruby_package = "Google::Cloud::Dialogflow::CX::V3beta1";
option (google.api.resource_definition) = {
  type: "dialogflow.googleapis.com/Session"
  pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
  pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
};
option (google.api.resource_definition) = {
  type: "discoveryengine.googleapis.com/DataStore"
  pattern: "projects/{project}/locations/{location}/dataStores/{data_store}"
  pattern: "projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}"
};

// A session represents an interaction with a user. You retrieve user input
// and pass it to the
// [DetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.DetectIntent]
// method to determine user intent and respond.
service Sessions {
  option (google.api.default_host) = "dialogflow.googleapis.com";
  option (google.api.oauth_scopes) =
      "https://www.googleapis.com/auth/cloud-platform,"
      "https://www.googleapis.com/auth/dialogflow";

  // Processes a natural language query and returns structured, actionable data
  // as a result. This method is not idempotent, because it may cause session
  // entity types to be updated, which in turn might affect results of future
  // queries.
  //
  // Note: Always use agent versions for production traffic.
  // See [Versions and
  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
    option (google.api.http) = {
      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
      body: "*"
      additional_bindings {
        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
        body: "*"
      }
    };
  }

  // Processes a natural language query and returns structured, actionable data
  // as a result through server-side streaming. Server-side streaming allows
  // Dialogflow to send [partial
  // responses](https://cloud.google.com/dialogflow/cx/docs/concept/fulfillment#partial-response)
  // earlier in a single request.
  rpc ServerStreamingDetectIntent(DetectIntentRequest)
      returns (stream DetectIntentResponse) {
    option (google.api.http) = {
      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:serverStreamingDetectIntent"
      body: "*"
      additional_bindings {
        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:serverStreamingDetectIntent"
        body: "*"
      }
    };
  }

  // Processes a natural language query in audio format in a streaming fashion
  // and returns structured, actionable data as a result. This method is only
  // available via the gRPC API (not REST).
  //
  // Note: Always use agent versions for production traffic.
  // See [Versions and
  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
      returns (stream StreamingDetectIntentResponse) {}

  // Returns preliminary intent match results, doesn't change the session
  // status.
  rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
    option (google.api.http) = {
      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
      body: "*"
      additional_bindings {
        post: "/v3beta1/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
        body: "*"
      }
    };
  }

  // Fulfills a matched intent returned by
  // [MatchIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.MatchIntent].
  // Must be called after
  // [MatchIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.MatchIntent],
  // with input from
  // [MatchIntentResponse][google.cloud.dialogflow.cx.v3beta1.MatchIntentResponse].
  // Otherwise, the behavior is undefined.
  rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
    option (google.api.http) = {
      post: "/v3beta1/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
      body: "*"
      additional_bindings {
        post: "/v3beta1/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
        body: "*"
      }
    };
  }

  // Updates the feedback received from the user for a single turn of the bot
  // response.
  rpc SubmitAnswerFeedback(SubmitAnswerFeedbackRequest)
      returns (AnswerFeedback) {
    option (google.api.http) = {
      post: "/v3beta1/{session=projects/*/locations/*/agents/*/sessions/*}:submitAnswerFeedback"
      body: "*"
    };
  }
}

// Stores information about feedback provided by users about a response.
message AnswerFeedback {
  // Represents thumbs up/down rating provided by user about a response.
  enum Rating {
    // Rating not specified.
    RATING_UNSPECIFIED = 0;

    // Thumbs up feedback from user.
    THUMBS_UP = 1;

    // Thumbs down feedback from user.
    THUMBS_DOWN = 2;
  }

  // Stores extra information about why users provided thumbs down rating.
  message RatingReason {
    // Optional. Custom reason labels for thumbs down rating provided by the
    // user. The maximum number of labels allowed is 10 and the maximum length
    // of a single label is 128 characters.
    repeated string reason_labels = 3 [(google.api.field_behavior) = OPTIONAL];

    // Optional. Additional feedback about the rating.
    // This field can be populated without choosing a predefined `reason`.
    string feedback = 2 [(google.api.field_behavior) = OPTIONAL];
  }

  // Optional. Rating from user for the specific Dialogflow response.
  Rating rating = 1 [(google.api.field_behavior) = OPTIONAL];

  // Optional. In case of thumbs down rating provided, users can optionally
  // provide context about the rating.
  RatingReason rating_reason = 2 [(google.api.field_behavior) = OPTIONAL];

  // Optional. Custom rating from the user about the provided answer, with
  // maximum length of 1024 characters. For example, client could use a
  // customized JSON object to indicate the rating.
  string custom_rating = 3 [(google.api.field_behavior) = OPTIONAL];
}

// The request to set the feedback for a bot answer.
message SubmitAnswerFeedbackRequest {
  // Required. The name of the session the feedback was sent to.
  string session = 1 [
    (google.api.field_behavior) = REQUIRED,
    (google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Session"
    }
  ];

  // Required. ID of the response to update its feedback. This is the same as
  // DetectIntentResponse.response_id.
  string response_id = 2 [(google.api.field_behavior) = REQUIRED];

  // Required. Feedback provided for a bot answer.
  AnswerFeedback answer_feedback = 3 [(google.api.field_behavior) = REQUIRED];

  // Optional. The mask to control which fields to update. If the mask is not
  // present, all fields will be updated.
  google.protobuf.FieldMask update_mask = 4
      [(google.api.field_behavior) = OPTIONAL];
}

// The request to detect user's intent.
message DetectIntentRequest {
  // Required. The name of the session this query is sent to.
  // Format:
  // `projects//locations//agents//sessions/`
  // or
  // `projects//locations//agents//environments//sessions/`.
  // If `Environment ID` is not specified, we assume default 'draft'
  // environment. It's up to the API caller to choose an appropriate `Session
  // ID`. It can be a random number or some type of session identifiers
  // (preferably hashed). The length of the `Session ID` must not exceed 36
  // characters.
  //
  // For more information, see the [sessions
  // guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  //
  // Note: Always use agent versions for production traffic.
  // See [Versions and
  // environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  string session = 1 [
    (google.api.field_behavior) = REQUIRED,
    (google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Session"
    }
  ];

  // The parameters of this query.
  QueryParameters query_params = 2;

  // Required. The input specification.
  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];

  // Instructs the speech synthesizer how to generate the output audio.
  OutputAudioConfig output_audio_config = 4;
}

// The message returned from the DetectIntent method.
message DetectIntentResponse {
  // Represents different DetectIntentResponse types.
  enum ResponseType {
    // Not specified. This should never happen.
    RESPONSE_TYPE_UNSPECIFIED = 0;

    // Partial response. e.g. Aggregated responses in a Fulfillment that enables
    // `return_partial_response` can be returned as partial response.
    // WARNING: partial response is not eligible for barge-in.
    PARTIAL = 1;

    // Final response.
    FINAL = 2;
  }

  // Output only. The unique identifier of the response. It can be used to
  // locate a response in the training example set or for reporting issues.
  string response_id = 1;

  // The result of the conversational query.
  QueryResult query_result = 2;

  // The audio data bytes encoded as specified in the request.
  // Note: The output audio is generated based on the values of default platform
  // text responses found in the
  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3beta1.QueryResult.response_messages]
  // field. If multiple default text responses exist, they will be concatenated
  // when generating audio. If no default platform text responses exist, the
  // generated audio content will be empty.
  //
  // In some scenarios, multiple output audio fields may be present in the
  // response structure. In these cases, only the top-most-level audio output
  // has content.
  bytes output_audio = 4;

  // The config used by the speech synthesizer to generate the output audio.
  OutputAudioConfig output_audio_config = 5;

  // Response type.
  ResponseType response_type = 6;

  // Indicates whether the partial response can be cancelled when a later
  // response arrives. e.g. if the agent specified some music as partial
  // response, it can be cancelled.
  bool allow_cancellation = 7;
}

// The top-level message sent by the client to the
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.StreamingDetectIntent]
// method.
//
// Multiple request messages should be sent in order:
//
// 1.  The first message must contain
//     [session][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.session],
//     [query_input][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_input]
//     plus optionally
//     [query_params][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_params].
//     If the client wants to receive an audio response, it should also contain
//     [output_audio_config][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.output_audio_config].
//
// 2.  If
// [query_input][google.cloud.dialogflow.cx.v3beta1.StreamingDetectIntentRequest.query_input]
// was set to
//     [query_input.audio.config][google.cloud.dialogflow.cx.v3beta1.AudioInput.config],
//     all subsequent messages must contain
//     [query_input.audio.audio][google.cloud.dialogflow.cx.v3beta1.AudioInput.audio]
//     to continue with Speech recognition. If you decide to rather detect an
//     intent from text input after you already started Speech recognition,
//     please send a message with
//     [query_input.text][google.cloud.dialogflow.cx.v3beta1.QueryInput.text].
//
//     However, note that:
//
//     * Dialogflow will bill you for the audio duration so far.
//     * Dialogflow discards all Speech recognition results in favor of the
//       input text.
//     * Dialogflow will use the language code from the first message.
//
// After you sent all input, you must half-close or abort the request stream.
message StreamingDetectIntentRequest {
  // The name of the session this query is sent to.
  // Format:
  // `projects//locations//agents//sessions/`
  // or
  // `projects//locations//agents//environments//sessions/`.
  // If `Environment ID` is not specified, we assume default 'draft'
  //  environment.
  //  It's up to the API caller to choose an appropriate `Session ID`. It can be
  //  a random number or some type of session identifiers (preferably hashed).
  //  The length of the `Session ID` must not exceed 36 characters.
  //  Note: session must be set in the first request.
  //
  //  For more information, see the [sessions
  //  guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  //
  //  Note: Always use agent versions for production traffic.
  //  See [Versions and
  //  environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
  string session = 1 [(google.api.resource_reference) = {
    type: "dialogflow.googleapis.com/Session"
  }];

  // The parameters of this query.
  QueryParameters query_params = 2;

  // Required. The input specification.
  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];

  // Instructs the speech synthesizer how to generate the output audio.
  OutputAudioConfig output_audio_config = 4;

  // Enable partial detect intent response. If this flag is not enabled,
  // response stream still contains only one final `DetectIntentResponse` even
  // if some `Fulfillment`s in the agent have been configured to return partial
  // responses.
  bool enable_partial_response = 5;

  // If true, `StreamingDetectIntentResponse.debugging_info` will get populated.
  bool enable_debugging_info = 8;
}

// Cloud conversation info for easier debugging.
// It will get populated in `StreamingDetectIntentResponse` or
// `StreamingAnalyzeContentResponse` when the flag `enable_debugging_info` is
// set to true in corresponding requests.
message CloudConversationDebuggingInfo {
  // Number of input audio data chunks in streaming requests.
  int32 audio_data_chunks = 1;

  // Time offset of the end of speech utterance relative to the
  // beginning of the first audio chunk.
  google.protobuf.Duration result_end_time_offset = 2;

  // Duration of first audio chunk.
  google.protobuf.Duration first_audio_duration = 3;

  // Whether client used single utterance mode.
  bool single_utterance = 5;

  // Time offsets of the speech partial results relative to the beginning of
  // the stream.
  repeated google.protobuf.Duration speech_partial_results_end_times = 6;

  // Time offsets of the speech final results (is_final=true) relative to the
  // beginning of the stream.
  repeated google.protobuf.Duration speech_final_results_end_times = 7;

  // Total number of partial responses.
  int32 partial_responses = 8;

  // Time offset of Speaker ID stream close time relative to the Speech stream
  // close time in milliseconds. Only meaningful for conversations involving
  // passive verification.
  int32 speaker_id_passive_latency_ms_offset = 9;

  // Whether a barge-in event is triggered in this request.
  bool bargein_event_triggered = 10;

  // Whether speech uses single utterance mode.
  bool speech_single_utterance = 11;

  // Time offsets of the DTMF partial results relative to the beginning of
  // the stream.
  repeated google.protobuf.Duration dtmf_partial_results_times = 12;

  // Time offsets of the DTMF final results relative to the beginning of
  // the stream.
  repeated google.protobuf.Duration dtmf_final_results_times = 13;

  // Time offset of the end-of-single-utterance signal relative to the
  // beginning of the stream.
  google.protobuf.Duration single_utterance_end_time_offset = 14;

  // No speech timeout settings for the stream.
  google.protobuf.Duration no_speech_timeout = 15;

  // Speech endpointing timeout settings for the stream.
  google.protobuf.Duration endpointing_timeout = 19;

  // Whether the streaming terminates with an injected text query.
  bool is_input_text = 16;

  // Client half close time in terms of input audio duration.
  google.protobuf.Duration client_half_close_time_offset = 17;

  // Client half close time in terms of API streaming duration.
  google.protobuf.Duration client_half_close_streaming_time_offset = 18;
}

// The top-level message returned from the
// [StreamingDetectIntent][google.cloud.dialogflow.cx.v3beta1.Sessions.StreamingDetectIntent]
// method.
//
// Multiple response messages (N) can be returned in order.
//
// The first (N-1) responses set either the `recognition_result` or
// `detect_intent_response` field, depending on the request:
//
// *   If the `StreamingDetectIntentRequest.query_input.audio` field was
//     set, and the `StreamingDetectIntentRequest.enable_partial_response`
//     field was false, the `recognition_result` field is populated for each
//     of the (N-1) responses.
//     See the
//     [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
//     message for details about the result message sequence.
//
// *   If the `StreamingDetectIntentRequest.enable_partial_response` field was
//     true, the `detect_intent_response` field is populated for each
//     of the (N-1) responses, where 1 <= N <= 4.
//     These responses set the
//     [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3beta1.DetectIntentResponse.response_type]
//     field to `PARTIAL`.
//
// For the final Nth response message, the `detect_intent_response` is fully
// populated, and
// [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3beta1.DetectIntentResponse.response_type]
// is set to `FINAL`.
message StreamingDetectIntentResponse {
  // The output response.
  oneof response {
    // The result of speech recognition.
    StreamingRecognitionResult recognition_result = 1;

    // The response from detect intent.
    DetectIntentResponse detect_intent_response = 2;
  }

  // Debugging info that would get populated when
  // `StreamingDetectIntentRequest.enable_debugging_info` is set to true.
  CloudConversationDebuggingInfo debugging_info = 4;
}

// Contains a speech recognition result corresponding to a portion of the audio
// that is currently being processed or an indication that this is the end
// of the single requested utterance.
//
// While end-user audio is being processed, Dialogflow sends a series of
// results. Each result may contain a `transcript` value. A transcript
// represents a portion of the utterance. While the recognizer is processing
// audio, transcript values may be interim values or finalized values.
// Once a transcript is finalized, the `is_final` value is set to true and
// processing continues for the next transcript.
//
// If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
// was true, and the recognizer has completed processing audio,
// the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
// following (last) result contains the last finalized transcript.
//
// The complete end-user utterance is determined by concatenating the
// finalized transcript values received for the series of results.
//
// In the following example, single utterance is enabled. In the case where
// single utterance is not enabled, result 7 would not occur.
//
// ```
// Num | transcript              | message_type            | is_final
// --- | ----------------------- | ----------------------- | --------
// 1   | "tube"                  | TRANSCRIPT              | false
// 2   | "to be a"               | TRANSCRIPT              | false
// 3   | "to be"                 | TRANSCRIPT              | false
// 4   | "to be or not to be"    | TRANSCRIPT              | true
// 5   | "that's"                | TRANSCRIPT              | false
// 6   | "that is                | TRANSCRIPT              | false
// 7   | unset                   | END_OF_SINGLE_UTTERANCE | unset
// 8   | " that is the question" | TRANSCRIPT              | true
// ```
//
// Concatenating the finalized transcripts with `is_final` set to true,
// the complete utterance becomes "to be or not to be that is the question".
message StreamingRecognitionResult {
  // Type of the response message.
  enum MessageType {
    // Not specified. Should never be used.
    MESSAGE_TYPE_UNSPECIFIED = 0;

    // Message contains a (possibly partial) transcript.
    TRANSCRIPT = 1;

    // This event indicates that the server has detected the end of the user's
    // speech utterance and expects no additional speech. Therefore, the server
    // will not process additional audio (although it may subsequently return
    // additional results). The client should stop sending additional audio
    // data, half-close the gRPC connection, and wait for any additional results
    // until the server closes the gRPC connection. This message is only sent if
    // [`single_utterance`][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.single_utterance]
    // was set to `true`, and is not used otherwise.
    END_OF_SINGLE_UTTERANCE = 2;
  }

  // Type of the result message.
  MessageType message_type = 1;

  // Transcript text representing the words that the user spoke.
  // Populated if and only if `message_type` = `TRANSCRIPT`.
  string transcript = 2;

  // If `false`, the `StreamingRecognitionResult` represents an
  // interim result that may change. If `true`, the recognizer will not return
  // any further hypotheses about this piece of the audio. May only be populated
  // for `message_type` = `TRANSCRIPT`.
  bool is_final = 3;

  // The Speech confidence between 0.0 and 1.0 for the current portion of audio.
  // A higher number indicates an estimated greater likelihood that the
  // recognized words are correct. The default of 0.0 is a sentinel value
  // indicating that confidence was not set.
  //
  // This field is typically only provided if `is_final` is true and you should
  // not rely on it being accurate or even set.
  float confidence = 4;

  // An estimate of the likelihood that the speech recognizer will
  // not change its guess about this interim recognition result:
  // * If the value is unspecified or 0.0, Dialogflow didn't compute the
  //   stability. In particular, Dialogflow will only provide stability for
  //   `TRANSCRIPT` results with `is_final = false`.
  // * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
  //   unstable and 1.0 means completely stable.
  float stability = 6;

  // Word-specific information for the words recognized by Speech in
  // [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
  // Populated if and only if `message_type` = `TRANSCRIPT` and
  // [InputAudioConfig.enable_word_info] is set.
  repeated SpeechWordInfo speech_word_info = 7;

  // Time offset of the end of this Speech recognition result relative to the
  // beginning of the audio. Only populated for `message_type` =
  // `TRANSCRIPT`.
  google.protobuf.Duration speech_end_offset = 8;

  // Detected language code for the transcript.
  string language_code = 10;
}

// Represents the parameters of a conversational query.
message QueryParameters {
  // The time zone of this conversational query from the [time zone
  // database](https://www.iana.org/time-zones), e.g., America/New_York,
  // Europe/Paris. If not provided, the time zone specified in the agent is
  // used.
  string time_zone = 1;

  // The geo location of this conversational query.
  google.type.LatLng geo_location = 2;

  // Additional session entity types to replace or extend developer entity types
  // with. The entity synonyms apply to all languages and persist for the
  // session of this query.
  repeated SessionEntityType session_entity_types = 3;

  // This field can be used to pass custom data into the webhook associated with
  // the agent. Arbitrary JSON objects are supported.
  // Some integrations that query a Dialogflow agent may provide additional
  // information in the payload.
  // In particular, for the Dialogflow Phone Gateway integration, this field has
  // the form:
  // ```
  // {
  //  "telephony": {
  //    "caller_id": "+18558363987"
  //  }
  // }
  // ```
  google.protobuf.Struct payload = 4;

  // Additional parameters to be put into [session
  // parameters][SessionInfo.parameters]. To remove a
  // parameter from the session, clients should explicitly set the parameter
  // value to null.
  //
  // You can reference the session parameters in the agent with the following
  // format: $session.params.parameter-id.
  //
  // Depending on your protocol or client library language, this is a
  // map, associative array, symbol table, dictionary, or JSON object
  // composed of a collection of (MapKey, MapValue) pairs:
  //
  // * MapKey type: string
  // * MapKey value: parameter name
  // * MapValue type: If parameter's entity type is a composite entity then use
  // map, otherwise, depending on the parameter value type, it could be one of
  // string, number, boolean, null, list or map.
  // * MapValue value: If parameter's entity type is a composite entity then use
  // map from composite entity property names to property values, otherwise,
  // use parameter value.
  google.protobuf.Struct parameters = 5;

  // The unique identifier of the
  // [page][google.cloud.dialogflow.cx.v3beta1.Page] to override the [current
  // page][QueryResult.current_page] in the session.
  // Format:
  // `projects//locations//agents//flows//pages/`.
  //
  //  If `current_page` is specified, the previous state of the session will be
  //  ignored by Dialogflow, including the [previous
  //  page][QueryResult.current_page] and the [previous session
  //  parameters][QueryResult.parameters].
  //  In most cases,
  //  [current_page][google.cloud.dialogflow.cx.v3beta1.QueryParameters.current_page]
  //  and
  //  [parameters][google.cloud.dialogflow.cx.v3beta1.QueryParameters.parameters]
  //  should be configured together to direct a session to a specific state.
  string current_page = 6 [
    (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Page" }
  ];

  // Whether to disable webhook calls for this request.
  bool disable_webhook = 7;

  // Configures whether sentiment analysis should be performed. If not
  // provided, sentiment analysis is not performed.
  bool analyze_query_text_sentiment = 8;

  // This field can be used to pass HTTP headers for a webhook
  // call. These headers will be sent to webhook along with the headers that
  // have been configured through Dialogflow web console. The headers defined
  // within this field will overwrite the headers configured through Dialogflow
  // console if there is a conflict. Header names are case-insensitive.
  // Google's specified headers are not allowed. Including: "Host",
  // "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
  // "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
  map webhook_headers = 10;

  // A list of flow versions to override for the request.
  // Format:
  // `projects//locations//agents//flows//versions/`.
  //
  //  If version 1 of flow X is included in this list, the traffic of
  //  flow X will go through version 1 regardless of the version configuration
  //  in the environment. Each flow can have at most one version specified in
  //  this list.
  repeated string flow_versions = 14 [(google.api.resource_reference) = {
    type: "dialogflow.googleapis.com/Version"
  }];

  // Optional. Start the session with the specified
  // [playbook][google.cloud.dialogflow.cx.v3beta1.Playbook]. You can only
  // specify the playbook at the beginning of the session. Otherwise, an error
  // will be thrown.
  //
  // Format:
  // `projects//locations//agents//playbooks/`.
  string current_playbook = 19 [
    (google.api.field_behavior) = OPTIONAL,
    (google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Playbook"
    }
  ];

  // Optional. Use the specified LLM model settings for processing the request.
  LlmModelSettings llm_model_settings = 21
      [(google.api.field_behavior) = OPTIONAL];

  // The channel which this query is for.
  //
  // If specified, only the
  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage]
  // associated with the channel will be returned. If no
  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] is
  // associated with the channel, it falls back to the
  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] with
  // unspecified channel.
  //
  // If unspecified, the
  // [ResponseMessage][google.cloud.dialogflow.cx.v3beta1.ResponseMessage] with
  // unspecified channel will be returned.
  string channel = 15;

  // Optional. Configure lifetime of the Dialogflow session.
  // By default, a Dialogflow session remains active and its data is stored for
  // 30 minutes after the last request is sent for the session.
  // This value should be no longer than 1 day.
  google.protobuf.Duration session_ttl = 16
      [(google.api.field_behavior) = OPTIONAL];

  // Optional. Information about the end-user to improve the relevance and
  // accuracy of generative answers.
  //
  // This will be interpreted and used by a language model, so, for good
  // results, the data should be self-descriptive, and in a simple structure.
  //
  // Example:
  //
  // ```json
  // {
  //   "subscription plan": "Business Premium Plus",
  //   "devices owned": [
  //     {"model": "Google Pixel 7"},
  //     {"model": "Google Pixel Tablet"}
  //   ]
  // }
  // ```
  google.protobuf.Struct end_user_metadata = 18
      [(google.api.field_behavior) = OPTIONAL];

  // Optional. Search configuration for UCS search queries.
  SearchConfig search_config = 20 [(google.api.field_behavior) = OPTIONAL];

  // Optional. If set to true and data stores are involved in serving the
  // request then
  // DetectIntentResponse.query_result.data_store_connection_signals
  // will be filled with data that can help evaluations.
  bool populate_data_store_connection_signals = 25
      [(google.api.field_behavior) = OPTIONAL];
}

// Search configuration for UCS search queries.
message SearchConfig {
  // Optional. Boosting configuration for the datastores.
  repeated BoostSpecs boost_specs = 1 [(google.api.field_behavior) = OPTIONAL];

  // Optional. Filter configuration for the datastores.
  repeated FilterSpecs filter_specs = 2
      [(google.api.field_behavior) = OPTIONAL];
}

// Boost specification to boost certain documents.
// A copy of google.cloud.discoveryengine.v1main.BoostSpec, field documentation
// is available at
// https://cloud.google.com/generative-ai-app-builder/docs/reference/rest/v1alpha/BoostSpec
message BoostSpec {
  // Boost applies to documents which match a condition.
  message ConditionBoostSpec {
    // Specification for custom ranking based on customer specified attribute
    // value. It provides more controls for customized ranking than the simple
    // (condition, boost) combination above.
    message BoostControlSpec {
      // The attribute(or function) for which the custom ranking is to be
      // applied.
      enum AttributeType {
        // Unspecified AttributeType.
        ATTRIBUTE_TYPE_UNSPECIFIED = 0;

        // The value of the numerical field will be used to dynamically update
        // the boost amount. In this case, the attribute_value (the x value)
        // of the control point will be the actual value of the numerical
        // field for which the boost_amount is specified.
        NUMERICAL = 1;

        // For the freshness use case the attribute value will be the duration
        // between the current time and the date in the datetime field
        // specified. The value must be formatted as an XSD `dayTimeDuration`
        // value (a restricted subset of an ISO 8601 duration value). The
        // pattern for this is: `[nD][T[nH][nM][nS]]`.
        // E.g. `5D`, `3DT12H30M`, `T24H`.
        FRESHNESS = 2;
      }

      // The interpolation type to be applied. Default will be linear
      // (Piecewise Linear).
      enum InterpolationType {
        // Interpolation type is unspecified. In this case, it defaults to
        // Linear.
        INTERPOLATION_TYPE_UNSPECIFIED = 0;

        // Piecewise linear interpolation will be applied.
        LINEAR = 1;
      }

      // The control points used to define the curve. The curve defined
      // through these control points can only be monotonically increasing
      // or decreasing(constant values are acceptable).
      message ControlPoint {
        // Optional. Can be one of:
        // 1. The numerical field value.
        // 2. The duration spec for freshness:
        // The value must be formatted as an XSD `dayTimeDuration` value (a
        // restricted subset of an ISO 8601 duration value). The pattern for
        // this is: `[nD][T[nH][nM][nS]]`.
        string attribute_value = 1 [(google.api.field_behavior) = OPTIONAL];

        // Optional. The value between -1 to 1 by which to boost the score if
        // the attribute_value evaluates to the value specified above.
        float boost_amount = 2 [(google.api.field_behavior) = OPTIONAL];
      }

      // Optional. The name of the field whose value will be used to determine
      // the boost amount.
      string field_name = 1 [(google.api.field_behavior) = OPTIONAL];

      // Optional. The attribute type to be used to determine the boost amount.
      // The attribute value can be derived from the field value of the
      // specified field_name. In the case of numerical it is straightforward
      // i.e. attribute_value = numerical_field_value. In the case of freshness
      // however, attribute_value = (time.now() - datetime_field_value).
      AttributeType attribute_type = 2 [(google.api.field_behavior) = OPTIONAL];

      // Optional. The interpolation type to be applied to connect the control
      // points listed below.
      InterpolationType interpolation_type = 3
          [(google.api.field_behavior) = OPTIONAL];

      // Optional. The control points used to define the curve. The monotonic
      // function (defined through the interpolation_type above) passes through
      // the control points listed here.
      repeated ControlPoint control_points = 4
          [(google.api.field_behavior) = OPTIONAL];
    }

    // Optional. An expression which specifies a boost condition. The syntax and
    // supported fields are the same as a filter expression.
    // Examples:
    //
    // * To boost documents with document ID "doc_1" or "doc_2", and
    // color
    //   "Red" or "Blue":
    //     * (id: ANY("doc_1", "doc_2")) AND (color: ANY("Red","Blue"))
    string condition = 1 [(google.api.field_behavior) = OPTIONAL];

    // Optional. Strength of the condition boost, which should be in [-1, 1].
    // Negative boost means demotion. Default is 0.0.
    //
    // Setting to 1.0 gives the document a big promotion. However, it does not
    // necessarily mean that the boosted document will be the top result at
    // all times, nor that other documents will be excluded. Results could
    // still be shown even when none of them matches the condition. And
    // results that are significantly more relevant to the search query can
    // still trump your heavily favored but irrelevant documents.
    //
    // Setting to -1.0 gives the document a big demotion. However, results
    // that are deeply relevant might still be shown. The document will have
    // an upstream battle to get a fairly high ranking, but it is not blocked
    // out completely.
    //
    // Setting to 0.0 means no boost applied. The boosting condition is
    // ignored.
    float boost = 2 [(google.api.field_behavior) = OPTIONAL];

    // Optional. Complex specification for custom ranking based on customer
    // defined attribute value.
    BoostControlSpec boost_control_spec = 4
        [(google.api.field_behavior) = OPTIONAL];
  }

  // Optional. Condition boost specifications. If a document matches multiple
  // conditions in the specifictions, boost scores from these specifications are
  // all applied and combined in a non-linear way. Maximum number of
  // specifications is 20.
  repeated ConditionBoostSpec condition_boost_specs = 1
      [(google.api.field_behavior) = OPTIONAL];
}

// Boost specifications for data stores.
message BoostSpecs {
  // Optional. Data Stores where the boosting configuration is applied. The full
  // names of the referenced data stores. Formats:
  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
  // `projects/{project}/locations/{location}/dataStores/{data_store}
  repeated string data_stores = 1 [
    (google.api.field_behavior) = OPTIONAL,
    (google.api.resource_reference) = {
      type: "discoveryengine.googleapis.com/DataStore"
    }
  ];

  // Optional. A list of boosting specifications.
  repeated BoostSpec spec = 2 [(google.api.field_behavior) = OPTIONAL];
}

// Filter specifications for data stores.
message FilterSpecs {
  // Optional. Data Stores where the boosting configuration is applied. The full
  // names of the referenced data stores. Formats:
  // `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}`
  // `projects/{project}/locations/{location}/dataStores/{data_store}
  repeated string data_stores = 1 [
    (google.api.field_behavior) = OPTIONAL,
    (google.api.resource_reference) = {
      type: "discoveryengine.googleapis.com/DataStore"
    }
  ];

  // Optional. The filter expression to be applied.
  // Expression syntax is documented at
  // https://cloud.google.com/generative-ai-app-builder/docs/filter-search-metadata#filter-expression-syntax
  string filter = 2 [(google.api.field_behavior) = OPTIONAL];
}

// Represents the query input. It can contain one of:
//
// 1. A conversational query in the form of text.
//
// 2. An intent query that specifies which intent to trigger.
//
// 3. Natural language speech audio to be processed.
//
// 4. An event to be triggered.
//
// 5. DTMF digits to invoke an intent and fill in parameter value.
//
// 6. The results of a tool executed by the client.
message QueryInput {
  // Required. The input specification.
  oneof input {
    // The natural language text to be processed.
    TextInput text = 2;

    // The intent to be triggered.
    IntentInput intent = 3;

    // The natural language speech audio to be processed.
    AudioInput audio = 5;

    // The event to be triggered.
    EventInput event = 6;

    // The DTMF event to be handled.
    DtmfInput dtmf = 7;

    // The results of a tool executed by the client.
    google.cloud.dialogflow.cx.v3beta1.ToolCallResult tool_call_result = 11;
  }

  // Required. The language of the input. See [Language
  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
  // for a list of the currently supported language codes. Note that queries in
  // the same session do not necessarily need to specify the same language.
  string language_code = 4 [(google.api.field_behavior) = REQUIRED];
}

// Represents the information of a query if handled by generative agent
// resources.
message GenerativeInfo {
  // The stack of [playbooks][google.cloud.dialogflow.cx.v3beta1.Playbook] that
  // the conversation has currently entered, with the most recent one on the
  // top.
  repeated string current_playbooks = 1;

  // The actions performed by the generative playbook for the current agent
  // response.
  Example action_tracing_info = 2;
}

// Represents the result of a conversational query.
message QueryResult {
  // The original conversational query.
  oneof query {
    // If [natural language text][google.cloud.dialogflow.cx.v3beta1.TextInput]
    // was provided as input, this field will contain a copy of the text.
    string text = 1;

    // If an [intent][google.cloud.dialogflow.cx.v3beta1.IntentInput] was
    // provided as input, this field will contain a copy of the intent
    // identifier. Format:
    // `projects//locations//agents//intents/`.
    string trigger_intent = 11 [(google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Intent"
    }];

    // If [natural language speech
    // audio][google.cloud.dialogflow.cx.v3beta1.AudioInput] was provided as
    // input, this field will contain the transcript for the audio.
    string transcript = 12;

    // If an [event][google.cloud.dialogflow.cx.v3beta1.EventInput] was provided
    // as input, this field will contain the name of the event.
    string trigger_event = 14;

    // If a [DTMF][google.cloud.dialogflow.cx.v3beta1.DtmfInput] was provided as
    // input, this field will contain a copy of the
    // [DtmfInput][google.cloud.dialogflow.cx.v3beta1.DtmfInput].
    DtmfInput dtmf = 23;
  }

  // The language that was triggered during intent detection.
  // See [Language
  // Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
  // for a list of the currently supported language codes.
  string language_code = 2;

  // The collected [session
  // parameters][google.cloud.dialogflow.cx.v3beta1.SessionInfo.parameters].
  //
  // Depending on your protocol or client library language, this is a
  // map, associative array, symbol table, dictionary, or JSON object
  // composed of a collection of (MapKey, MapValue) pairs:
  //
  // * MapKey type: string
  // * MapKey value: parameter name
  // * MapValue type: If parameter's entity type is a composite entity then use
  // map, otherwise, depending on the parameter value type, it could be one of
  // string, number, boolean, null, list or map.
  // * MapValue value: If parameter's entity type is a composite entity then use
  // map from composite entity property names to property values, otherwise,
  // use parameter value.
  google.protobuf.Struct parameters = 3;

  // The list of rich messages returned to the client. Responses vary from
  // simple text messages to more sophisticated, structured payloads used
  // to drive complex logic.
  repeated ResponseMessage response_messages = 4;

  // The list of webhook ids in the order of call sequence.
  repeated string webhook_ids = 25;

  // The list of webhook display names in the order of call sequence.
  repeated string webhook_display_names = 26;

  // The list of webhook latencies in the order of call sequence.
  repeated google.protobuf.Duration webhook_latencies = 27;

  // The list of webhook tags in the order of call sequence.
  repeated string webhook_tags = 29;

  // The list of webhook call status in the order of call sequence.
  repeated google.rpc.Status webhook_statuses = 13;

  // The list of webhook payload in
  // [WebhookResponse.payload][google.cloud.dialogflow.cx.v3beta1.WebhookResponse.payload],
  // in the order of call sequence. If some webhook call fails or doesn't return
  // any payload, an empty `Struct` would be used instead.
  repeated google.protobuf.Struct webhook_payloads = 6;

  // The current [Page][google.cloud.dialogflow.cx.v3beta1.Page]. Some, not all
  // fields are filled in this message, including but not limited to `name` and
  // `display_name`.
  Page current_page = 7;

  // The current [Flow][google.cloud.dialogflow.cx.v3beta1.Flow]. Some, not all
  // fields are filled in this message, including but not limited to `name` and
  // `display_name`.
  Flow current_flow = 31;

  // The [Intent][google.cloud.dialogflow.cx.v3beta1.Intent] that matched the
  // conversational query. Some, not all fields are filled in this message,
  // including but not limited to: `name` and `display_name`. This field is
  // deprecated, please use
  // [QueryResult.match][google.cloud.dialogflow.cx.v3beta1.QueryResult.match]
  // instead.
  Intent intent = 8 [deprecated = true];

  // The intent detection confidence. Values range from 0.0 (completely
  // uncertain) to 1.0 (completely certain).
  // This value is for informational purpose only and is only used to
  // help match the best intent within the classification threshold.
  // This value may change for the same end-user expression at any time due to a
  // model retraining or change in implementation.
  // This field is deprecated, please use
  // [QueryResult.match][google.cloud.dialogflow.cx.v3beta1.QueryResult.match]
  // instead.
  float intent_detection_confidence = 9 [deprecated = true];

  // Intent match result, could be an intent or an event.
  Match match = 15;

  // The free-form diagnostic info. For example, this field could contain
  // webhook call latency. The fields of this data can change without notice,
  // so you should not write code that depends on its structure.
  //
  // One of the fields is called "Alternative Matched Intents", which may
  // aid with debugging. The following describes these intent results:
  //
  // - The list is empty if no intent was matched to end-user input.
  // - Only intents that are referenced in the currently active flow are
  //   included.
  // - The matched intent is included.
  // - Other intents that could have matched end-user input, but did not match
  //   because they are referenced by intent routes that are out of
  //   [scope](https://cloud.google.com/dialogflow/cx/docs/concept/handler#scope),
  //   are included.
  // - Other intents referenced by intent routes in scope that matched end-user
  //   input, but had a lower confidence score.
  google.protobuf.Struct diagnostic_info = 10;

  // The information of a query if handled by generative agent resources.
  GenerativeInfo generative_info = 33;

  // The sentiment analyss result, which depends on
  // [`analyze_query_text_sentiment`]
  // [google.cloud.dialogflow.cx.v3beta1.QueryParameters.analyze_query_text_sentiment],
  // specified in the request.
  SentimentAnalysisResult sentiment_analysis_result = 17;

  // Returns the current advanced settings including IVR settings. Even though
  // the operations configured by these settings are performed by Dialogflow,
  // the client may need to perform special logic at the moment. For example, if
  // Dialogflow exports audio to Google Cloud Storage, then the client may need
  // to wait for the resulting object to appear in the bucket before proceeding.
  AdvancedSettings advanced_settings = 21;

  // Indicates whether the Thumbs up/Thumbs down rating controls are need to be
  // shown for the response in the Dialogflow Messenger widget.
  bool allow_answer_feedback = 32;

  // Optional. Data store connection feature output signals.
  // Filled only when data stores are involved in serving the query and
  // DetectIntentRequest.populate_data_store_connection_signals is set to true
  // in the request.
  DataStoreConnectionSignals data_store_connection_signals = 35
      [(google.api.field_behavior) = OPTIONAL];
}

// Represents the natural language text to be processed.
message TextInput {
  // Required. The UTF-8 encoded natural language text to be processed.
  string text = 1 [(google.api.field_behavior) = REQUIRED];
}

// Represents the intent to trigger programmatically rather than as a result of
// natural language processing.
message IntentInput {
  // Required. The unique identifier of the intent.
  // Format:
  // `projects//locations//agents//intents/`.
  string intent = 1 [
    (google.api.field_behavior) = REQUIRED,
    (google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Intent"
    }
  ];
}

// Represents the natural speech audio to be processed.
message AudioInput {
  // Required. Instructs the speech recognizer how to process the speech audio.
  InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];

  // The natural language speech audio to be processed.
  // A single request can contain up to 2 minutes of speech audio data.
  // The [transcribed
  // text][google.cloud.dialogflow.cx.v3beta1.QueryResult.transcript] cannot
  // contain more than 256 bytes.
  //
  // For non-streaming audio detect intent, both `config` and `audio` must be
  // provided.
  // For streaming audio detect intent, `config` must be provided in
  // the first request and `audio` must be provided in all following requests.
  bytes audio = 2;
}

// Represents the event to trigger.
message EventInput {
  // Name of the event.
  string event = 1;
}

// Represents the input for dtmf event.
message DtmfInput {
  // The dtmf digits.
  string digits = 1;

  // The finish digit (if any).
  string finish_digit = 2;
}

// Represents one match result of [MatchIntent][].
message Match {
  // Type of a Match.
  enum MatchType {
    // Not specified. Should never be used.
    MATCH_TYPE_UNSPECIFIED = 0;

    // The query was matched to an intent.
    INTENT = 1;

    // The query directly triggered an intent.
    DIRECT_INTENT = 2;

    // The query was used for parameter filling.
    PARAMETER_FILLING = 3;

    // No match was found for the query.
    NO_MATCH = 4;

    // Indicates an empty query.
    NO_INPUT = 5;

    // The query directly triggered an event.
    EVENT = 6;

    // The query was matched to a Knowledge Connector answer.
    KNOWLEDGE_CONNECTOR = 8;

    // The query was handled by a
    // [`Playbook`][google.cloud.dialogflow.cx.v3beta1.Playbook].
    PLAYBOOK = 9;
  }

  // The [Intent][google.cloud.dialogflow.cx.v3beta1.Intent] that matched the
  // query. Some, not all fields are filled in this message, including but not
  // limited to: `name` and `display_name`. Only filled for
  // [`INTENT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] match type.
  Intent intent = 1;

  // The event that matched the query. Filled for
  // [`EVENT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType],
  // [`NO_MATCH`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] and
  // [`NO_INPUT`][google.cloud.dialogflow.cx.v3beta1.Match.MatchType] match
  // types.
  string event = 6;

  // The collection of parameters extracted from the query.
  //
  // Depending on your protocol or client library language, this is a
  // map, associative array, symbol table, dictionary, or JSON object
  // composed of a collection of (MapKey, MapValue) pairs:
  //
  // * MapKey type: string
  // * MapKey value: parameter name
  // * MapValue type: If parameter's entity type is a composite entity then use
  // map, otherwise, depending on the parameter value type, it could be one of
  // string, number, boolean, null, list or map.
  // * MapValue value: If parameter's entity type is a composite entity then use
  // map from composite entity property names to property values, otherwise,
  // use parameter value.
  google.protobuf.Struct parameters = 2;

  // Final text input which was matched during MatchIntent. This value can be
  // different from original input sent in request because of spelling
  // correction or other processing.
  string resolved_input = 3;

  // Type of this [Match][google.cloud.dialogflow.cx.v3beta1.Match].
  MatchType match_type = 4;

  // The confidence of this match. Values range from 0.0 (completely uncertain)
  // to 1.0 (completely certain).
  // This value is for informational purpose only and is only used to help match
  // the best intent within the classification threshold. This value may change
  // for the same end-user expression at any time due to a model retraining or
  // change in implementation.
  float confidence = 5;
}

// Request of [MatchIntent][].
message MatchIntentRequest {
  // Required. The name of the session this query is sent to.
  // Format:
  // `projects//locations//agents//sessions/`
  // or
  // `projects//locations//agents//environments//sessions/`.
  // If `Environment ID` is not specified, we assume default 'draft'
  //  environment.
  //  It's up to the API caller to choose an appropriate `Session ID`. It can be
  //  a random number or some type of session identifiers (preferably hashed).
  //  The length of the `Session ID` must not exceed 36 characters.
  //
  //  For more information, see the [sessions
  //  guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
  string session = 1 [
    (google.api.field_behavior) = REQUIRED,
    (google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Session"
    }
  ];

  // The parameters of this query.
  QueryParameters query_params = 2;

  // Required. The input specification.
  QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];

  // Persist session parameter changes from `query_params`.
  bool persist_parameter_changes = 5;
}

// Response of [MatchIntent][].
message MatchIntentResponse {
  // The original conversational query.
  oneof query {
    // If [natural language text][google.cloud.dialogflow.cx.v3beta1.TextInput]
    // was provided as input, this field will contain a copy of the text.
    string text = 1;

    // If an [intent][google.cloud.dialogflow.cx.v3beta1.IntentInput] was
    // provided as input, this field will contain a copy of the intent
    // identifier. Format:
    // `projects//locations//agents//intents/`.
    string trigger_intent = 2 [(google.api.resource_reference) = {
      type: "dialogflow.googleapis.com/Intent"
    }];

    // If [natural language speech
    // audio][google.cloud.dialogflow.cx.v3beta1.AudioInput] was provided as
    // input, this field will contain the transcript for the audio.
    string transcript = 3;

    // If an [event][google.cloud.dialogflow.cx.v3beta1.EventInput] was provided
    // as input, this field will contain a copy of the event name.
    string trigger_event = 6;
  }

  // Match results, if more than one, ordered descendingly by the confidence
  // we have that the particular intent matches the query.
  repeated Match matches = 4;

  // The current [Page][google.cloud.dialogflow.cx.v3beta1.Page]. Some, not all
  // fields are filled in this message, including but not limited to `name` and
  // `display_name`.
  Page current_page = 5;
}

// Request of [FulfillIntent][]
message FulfillIntentRequest {
  // Must be same as the corresponding MatchIntent request, otherwise the
  // behavior is undefined.
  MatchIntentRequest match_intent_request = 1;

  // The matched intent/event to fulfill.
  Match match = 2;

  // Instructs the speech synthesizer how to generate output audio.
  OutputAudioConfig output_audio_config = 3;
}

// Response of [FulfillIntent][]
message FulfillIntentResponse {
  // Output only. The unique identifier of the response. It can be used to
  // locate a response in the training example set or for reporting issues.
  string response_id = 1;

  // The result of the conversational query.
  QueryResult query_result = 2;

  // The audio data bytes encoded as specified in the request.
  // Note: The output audio is generated based on the values of default platform
  // text responses found in the
  // [`query_result.response_messages`][google.cloud.dialogflow.cx.v3beta1.QueryResult.response_messages]
  // field. If multiple default text responses exist, they will be concatenated
  // when generating audio. If no default platform text responses exist, the
  // generated audio content will be empty.
  //
  // In some scenarios, multiple output audio fields may be present in the
  // response structure. In these cases, only the top-most-level audio output
  // has content.
  bytes output_audio = 3;

  // The config used by the speech synthesizer to generate the output audio.
  OutputAudioConfig output_audio_config = 4;
}

// The result of sentiment analysis. Sentiment analysis inspects user input
// and identifies the prevailing subjective opinion, especially to determine a
// user's attitude as positive, negative, or neutral.
message SentimentAnalysisResult {
  // Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
  // sentiment).
  float score = 1;

  // A non-negative number in the [0, +inf) range, which represents the absolute
  // magnitude of sentiment, regardless of score (positive or negative).
  float magnitude = 2;
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy