google.cloud.dialogflow.cx.v3.session.proto Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto-google-cloud-dialogflow-cx-v3 Show documentation
Show all versions of proto-google-cloud-dialogflow-cx-v3 Show documentation
PROTO library for proto-google-cloud-dialogflow-cx-v3
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.dialogflow.cx.v3;
import "google/api/annotations.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/dialogflow/cx/v3/advanced_settings.proto";
import "google/cloud/dialogflow/cx/v3/audio_config.proto";
import "google/cloud/dialogflow/cx/v3/flow.proto";
import "google/cloud/dialogflow/cx/v3/intent.proto";
import "google/cloud/dialogflow/cx/v3/page.proto";
import "google/cloud/dialogflow/cx/v3/response_message.proto";
import "google/cloud/dialogflow/cx/v3/session_entity_type.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/rpc/status.proto";
import "google/type/latlng.proto";
import "google/api/client.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/cx/v3;cx";
option java_multiple_files = true;
option java_outer_classname = "SessionProto";
option java_package = "com.google.cloud.dialogflow.cx.v3";
option objc_class_prefix = "DF";
option ruby_package = "Google::Cloud::Dialogflow::CX::V3";
option (google.api.resource_definition) = {
type: "dialogflow.googleapis.com/Session"
pattern: "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}"
pattern: "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/sessions/{session}"
};
// A session represents an interaction with a user. You retrieve user input
// and pass it to the [DetectIntent][google.cloud.dialogflow.cx.v3.Sessions.DetectIntent] method to determine
// user intent and respond.
service Sessions {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/dialogflow";
// Processes a natural language query and returns structured, actionable data
// as a result. This method is not idempotent, because it may cause session
// entity types to be updated, which in turn might affect results of future
// queries.
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
option (google.api.http) = {
post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:detectIntent"
body: "*"
additional_bindings {
post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:detectIntent"
body: "*"
}
};
}
// Processes a natural language query in audio format in a streaming fashion
// and returns structured, actionable data as a result. This method is only
// available via the gRPC API (not REST).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
}
// Returns preliminary intent match results, doesn't change the session
// status.
rpc MatchIntent(MatchIntentRequest) returns (MatchIntentResponse) {
option (google.api.http) = {
post: "/v3/{session=projects/*/locations/*/agents/*/sessions/*}:matchIntent"
body: "*"
additional_bindings {
post: "/v3/{session=projects/*/locations/*/agents/*/environments/*/sessions/*}:matchIntent"
body: "*"
}
};
}
// Fulfills a matched intent returned by [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent].
// Must be called after [MatchIntent][google.cloud.dialogflow.cx.v3.Sessions.MatchIntent], with input from
// [MatchIntentResponse][google.cloud.dialogflow.cx.v3.MatchIntentResponse]. Otherwise, the behavior is undefined.
rpc FulfillIntent(FulfillIntentRequest) returns (FulfillIntentResponse) {
option (google.api.http) = {
post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/sessions/*}:fulfillIntent"
body: "*"
additional_bindings {
post: "/v3/{match_intent_request.session=projects/*/locations/*/agents/*/environments/*/sessions/*}:fulfillIntent"
body: "*"
}
};
}
}
// The request to detect user's intent.
message DetectIntentRequest {
// Required. The name of the session this query is sent to.
// Format: `projects//locations//agents//sessions/` or `projects//locations//agents//environments//sessions/`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}
];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
// Instructs the speech synthesizer how to generate the output audio.
OutputAudioConfig output_audio_config = 4;
}
// The message returned from the DetectIntent method.
message DetectIntentResponse {
// Represents different DetectIntentResponse types.
enum ResponseType {
// Not specified. This should never happen.
RESPONSE_TYPE_UNSPECIFIED = 0;
// Partial response. e.g. Aggregated responses in a Fulfillment that enables
// `return_partial_response` can be returned as partial response.
// WARNING: partial response is not eligible for barge-in.
PARTIAL = 1;
// Final response.
FINAL = 2;
}
// Output only. The unique identifier of the response. It can be used to
// locate a response in the training example set or for reporting issues.
string response_id = 1;
// The result of the conversational query.
QueryResult query_result = 2;
// The audio data bytes encoded as specified in the request.
// Note: The output audio is generated based on the values of default platform
// text responses found in the
// [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
// multiple default text responses exist, they will be concatenated when
// generating audio. If no default platform text responses exist, the
// generated audio content will be empty.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
bytes output_audio = 4;
// The config used by the speech synthesizer to generate the output audio.
OutputAudioConfig output_audio_config = 5;
// Response type.
ResponseType response_type = 6;
// Indicates whether the partial response can be cancelled when a later
// response arrives. e.g. if the agent specified some music as partial
// response, it can be cancelled.
bool allow_cancellation = 7;
}
// The top-level message sent by the client to the
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent] method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
// [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] plus optionally
// [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params]. If the client
// wants to receive an audio response, it should also contain
// [output_audio_config][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.output_audio_config].
//
// 2. If [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] was set to
// [query_input.audio.config][google.cloud.dialogflow.cx.v3.AudioInput.config], all subsequent messages
// must contain [query_input.audio.audio][google.cloud.dialogflow.cx.v3.AudioInput.audio] to continue with
// Speech recognition.
// If you decide to rather detect an intent from text
// input after you already started Speech recognition, please send a message
// with [query_input.text][google.cloud.dialogflow.cx.v3.QueryInput.text].
//
// However, note that:
//
// * Dialogflow will bill you for the audio duration so far.
// * Dialogflow discards all Speech recognition results in favor of the
// input text.
// * Dialogflow will use the language code from the first message.
//
// After you sent all input, you must half-close or abort the request stream.
message StreamingDetectIntentRequest {
// The name of the session this query is sent to.
// Format: `projects//locations//agents//sessions/` or `projects//locations//agents//environments//sessions/`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
// Note: session must be set in the first request.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
//
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/cx/docs/concept/version).
string session = 1 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
// Instructs the speech synthesizer how to generate the output audio.
OutputAudioConfig output_audio_config = 4;
// Enable partial detect intent response. If this flag is not enabled,
// response stream still contains only one final `DetectIntentResponse` even
// if some `Fulfillment`s in the agent have been configured to return partial
// responses.
bool enable_partial_response = 5;
}
// The top-level message returned from the `StreamingDetectIntent` method.
//
// Multiple response messages can be returned in order:
//
// 1. If the input was set to streaming audio, the first one or more messages
// contain `recognition_result`. Each `recognition_result` represents a more
// complete transcript of what the user said. The last `recognition_result`
// has `is_final` set to `true`.
//
// 2. If `enable_partial_response` is true, the following N messages
// (currently 1 <= N <= 4) contain `detect_intent_response`. The first (N-1)
// `detect_intent_response`s will have `response_type` set to `PARTIAL`.
// The last `detect_intent_response` has `response_type` set to `FINAL`.
// If `response_type` is false, response stream only contains
// the final `detect_intent_response`.
message StreamingDetectIntentResponse {
// The output response.
oneof response {
// The result of speech recognition.
StreamingRecognitionResult recognition_result = 1;
// The response from detect intent.
DetectIntentResponse detect_intent_response = 2;
}
}
// Contains a speech recognition result corresponding to a portion of the audio
// that is currently being processed or an indication that this is the end
// of the single requested utterance.
//
// Example:
//
// 1. transcript: "tube"
//
// 2. transcript: "to be a"
//
// 3. transcript: "to be"
//
// 4. transcript: "to be or not to be"
// is_final: true
//
// 5. transcript: " that's"
//
// 6. transcript: " that is"
//
// 7. message_type: `END_OF_SINGLE_UTTERANCE`
//
// 8. transcript: " that is the question"
// is_final: true
//
// Only two of the responses contain final results (#4 and #8 indicated by
// `is_final: true`). Concatenating these generates the full transcript: "to be
// or not to be that is the question".
//
// In each response we populate:
//
// * for `TRANSCRIPT`: `transcript` and possibly `is_final`.
//
// * for `END_OF_SINGLE_UTTERANCE`: only `message_type`.
message StreamingRecognitionResult {
// Type of the response message.
enum MessageType {
// Not specified. Should never be used.
MESSAGE_TYPE_UNSPECIFIED = 0;
// Message contains a (possibly partial) transcript.
TRANSCRIPT = 1;
// Event indicates that the server has detected the end of the user's speech
// utterance and expects no additional speech. Therefore, the server will
// not process additional audio (although it may subsequently return
// additional results). The client should stop sending additional audio
// data, half-close the gRPC connection, and wait for any additional results
// until the server closes the gRPC connection. This message is only sent if
// [`single_utterance`][google.cloud.dialogflow.cx.v3.InputAudioConfig.single_utterance] was set to
// `true`, and is not used otherwise.
END_OF_SINGLE_UTTERANCE = 2;
}
// Type of the result message.
MessageType message_type = 1;
// Transcript text representing the words that the user spoke.
// Populated if and only if `message_type` = `TRANSCRIPT`.
string transcript = 2;
// If `false`, the `StreamingRecognitionResult` represents an
// interim result that may change. If `true`, the recognizer will not return
// any further hypotheses about this piece of the audio. May only be populated
// for `message_type` = `TRANSCRIPT`.
bool is_final = 3;
// The Speech confidence between 0.0 and 1.0 for the current portion of audio.
// A higher number indicates an estimated greater likelihood that the
// recognized words are correct. The default of 0.0 is a sentinel value
// indicating that confidence was not set.
//
// This field is typically only provided if `is_final` is true and you should
// not rely on it being accurate or even set.
float confidence = 4;
// An estimate of the likelihood that the speech recognizer will
// not change its guess about this interim recognition result:
// * If the value is unspecified or 0.0, Dialogflow didn't compute the
// stability. In particular, Dialogflow will only provide stability for
// `TRANSCRIPT` results with `is_final = false`.
// * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
// unstable and 1.0 means completely stable.
float stability = 6;
// Word-specific information for the words recognized by Speech in
// [transcript][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
// [InputAudioConfig.enable_word_info] is set.
repeated SpeechWordInfo speech_word_info = 7;
// Time offset of the end of this Speech recognition result relative to the
// beginning of the audio. Only populated for `message_type` =
// `TRANSCRIPT`.
google.protobuf.Duration speech_end_offset = 8;
// Detected language code for the transcript.
string language_code = 10;
}
// Represents the parameters of a conversational query.
message QueryParameters {
// The time zone of this conversational query from the [time zone
// database](https://www.iana.org/time-zones), e.g., America/New_York,
// Europe/Paris. If not provided, the time zone specified in the agent is
// used.
string time_zone = 1;
// The geo location of this conversational query.
google.type.LatLng geo_location = 2;
// Additional session entity types to replace or extend developer entity types
// with. The entity synonyms apply to all languages and persist for the
// session of this query.
repeated SessionEntityType session_entity_types = 3;
// This field can be used to pass custom data into the webhook associated with
// the agent. Arbitrary JSON objects are supported.
// Some integrations that query a Dialogflow agent may provide additional
// information in the payload.
// In particular, for the Dialogflow Phone Gateway integration, this field has
// the form:
// ```
// {
// "telephony": {
// "caller_id": "+18558363987"
// }
// }
// ```
google.protobuf.Struct payload = 4;
// Additional parameters to be put into [session
// parameters][SessionInfo.parameters]. To remove a
// parameter from the session, clients should explicitly set the parameter
// value to null.
//
// You can reference the session parameters in the agent with the following
// format: $session.params.parameter-id.
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// - MapKey type: string
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
// - Else: parameter value
google.protobuf.Struct parameters = 5;
// The unique identifier of the [page][google.cloud.dialogflow.cx.v3.Page] to override the [current
// page][QueryResult.current_page] in the session.
// Format: `projects//locations//agents//flows//pages/`.
//
// If `current_page` is specified, the previous state of the session will be
// ignored by Dialogflow, including the [previous
// page][QueryResult.current_page] and the [previous session
// parameters][QueryResult.parameters].
// In most cases, [current_page][google.cloud.dialogflow.cx.v3.QueryParameters.current_page] and
// [parameters][google.cloud.dialogflow.cx.v3.QueryParameters.parameters] should be configured together to
// direct a session to a specific state.
string current_page = 6 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Page"
}];
// Whether to disable webhook calls for this request.
bool disable_webhook = 7;
// Configures whether sentiment analysis should be performed. If not
// provided, sentiment analysis is not performed.
bool analyze_query_text_sentiment = 8;
// This field can be used to pass HTTP headers for a webhook
// call. These headers will be sent to webhook along with the headers that
// have been configured through Dialogflow web console. The headers defined
// within this field will overwrite the headers configured through Dialogflow
// console if there is a conflict. Header names are case-insensitive.
// Google's specified headers are not allowed. Including: "Host",
// "Content-Length", "Connection", "From", "User-Agent", "Accept-Encoding",
// "If-Modified-Since", "If-None-Match", "X-Forwarded-For", etc.
map webhook_headers = 10;
// A list of flow versions to override for the request.
// Format: `projects//locations//agents//flows//versions/`.
//
// If version 1 of flow X is included in this list, the traffic of
// flow X will go through version 1 regardless of the version configuration in
// the environment. Each flow can have at most one version specified in this
// list.
repeated string flow_versions = 14 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Version"
}];
}
// Represents the query input. It can contain one of:
//
// 1. A conversational query in the form of text.
//
// 2. An intent query that specifies which intent to trigger.
//
// 3. Natural language speech audio to be processed.
//
// 4. An event to be triggered.
//
message QueryInput {
// Required. The input specification.
oneof input {
// The natural language text to be processed.
TextInput text = 2;
// The intent to be triggered.
IntentInput intent = 3;
// The natural language speech audio to be processed.
AudioInput audio = 5;
// The event to be triggered.
EventInput event = 6;
// The DTMF event to be handled.
DtmfInput dtmf = 7;
}
// Required. The language of the input. See [Language
// Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
// for a list of the currently supported language codes. Note that queries in
// the same session do not necessarily need to specify the same language.
string language_code = 4 [(google.api.field_behavior) = REQUIRED];
}
// Represents the result of a conversational query.
message QueryResult {
// The original conversational query.
oneof query {
// If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
// will contain a copy of the text.
string text = 1;
// If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
// contain a copy of the intent identifier.
// Format: `projects//locations//agents//intents/`.
string trigger_intent = 11 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}];
// If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
// this field will contain the transcript for the audio.
string transcript = 12;
// If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will contain
// the name of the event.
string trigger_event = 14;
}
// The language that was triggered during intent detection.
// See [Language
// Support](https://cloud.google.com/dialogflow/cx/docs/reference/language)
// for a list of the currently supported language codes.
string language_code = 2;
// The collected [session parameters][google.cloud.dialogflow.cx.v3.SessionInfo.parameters].
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// - MapKey type: string
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
// - Else: parameter value
google.protobuf.Struct parameters = 3;
// The list of rich messages returned to the client. Responses vary from
// simple text messages to more sophisticated, structured payloads used
// to drive complex logic.
repeated ResponseMessage response_messages = 4;
// The list of webhook call status in the order of call sequence.
repeated google.rpc.Status webhook_statuses = 13;
// The list of webhook payload in [WebhookResponse.payload][google.cloud.dialogflow.cx.v3.WebhookResponse.payload], in
// the order of call sequence. If some webhook call fails or doesn't return
// any payload, an empty `Struct` would be used instead.
repeated google.protobuf.Struct webhook_payloads = 6;
// The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
// including but not limited to `name` and `display_name`.
Page current_page = 7;
// The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the conversational query. Some, not all fields
// are filled in this message, including but not limited to: `name` and
// `display_name`.
// This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
Intent intent = 8 [deprecated = true];
// The intent detection confidence. Values range from 0.0 (completely
// uncertain) to 1.0 (completely certain).
// This value is for informational purpose only and is only used to
// help match the best intent within the classification threshold.
// This value may change for the same end-user expression at any time due to a
// model retraining or change in implementation.
// This field is deprecated, please use [QueryResult.match][google.cloud.dialogflow.cx.v3.QueryResult.match] instead.
float intent_detection_confidence = 9 [deprecated = true];
// Intent match result, could be an intent or an event.
Match match = 15;
// The free-form diagnostic info. For example, this field could contain
// webhook call latency. The string keys of the Struct's fields map can change
// without notice.
google.protobuf.Struct diagnostic_info = 10;
// The sentiment analyss result, which depends on
// [`analyze_query_text_sentiment`]
// [google.cloud.dialogflow.cx.v3.QueryParameters.analyze_query_text_sentiment], specified in the request.
SentimentAnalysisResult sentiment_analysis_result = 17;
}
// Represents the natural language text to be processed.
message TextInput {
// Required. The UTF-8 encoded natural language text to be processed. Text length must
// not exceed 256 characters.
string text = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents the intent to trigger programmatically rather than as a result of
// natural language processing.
message IntentInput {
// Required. The unique identifier of the intent.
// Format: `projects//locations//agents//intents/`.
string intent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}
];
}
// Represents the natural speech audio to be processed.
message AudioInput {
// Required. Instructs the speech recognizer how to process the speech audio.
InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED];
// The natural language speech audio to be processed.
// A single request can contain up to 1 minute of speech audio data.
// The [transcribed text][google.cloud.dialogflow.cx.v3.QueryResult.transcript] cannot contain more than 256
// bytes.
//
// For non-streaming audio detect intent, both `config` and `audio` must be
// provided.
// For streaming audio detect intent, `config` must be provided in
// the first request and `audio` must be provided in all following requests.
bytes audio = 2;
}
// Represents the event to trigger.
message EventInput {
// Name of the event.
string event = 1;
}
// Represents the input for dtmf event.
message DtmfInput {
// The dtmf digits.
string digits = 1;
// The finish digit (if any).
string finish_digit = 2;
}
// Represents one match result of [MatchIntent][].
message Match {
// Type of a Match.
enum MatchType {
// Not specified. Should never be used.
MATCH_TYPE_UNSPECIFIED = 0;
// The query was matched to an intent.
INTENT = 1;
// The query directly triggered an intent.
DIRECT_INTENT = 2;
// The query was used for parameter filling.
PARAMETER_FILLING = 3;
// No match was found for the query.
NO_MATCH = 4;
// Indicates an empty query.
NO_INPUT = 5;
// The query directly triggered an event.
EVENT = 6;
}
// The [Intent][google.cloud.dialogflow.cx.v3.Intent] that matched the query. Some, not all fields are filled in
// this message, including but not limited to: `name` and `display_name`. Only
// filled for [`INTENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
Intent intent = 1;
// The event that matched the query. Only filled for
// [`EVENT`][google.cloud.dialogflow.cx.v3.Match.MatchType] match type.
string event = 6;
// The collection of parameters extracted from the query.
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// - MapKey type: string
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
// - Else: parameter value
google.protobuf.Struct parameters = 2;
// Final text input which was matched during MatchIntent. This value can be
// different from original input sent in request because of spelling
// correction or other processing.
string resolved_input = 3;
// Type of this [Match][google.cloud.dialogflow.cx.v3.Match].
MatchType match_type = 4;
// The confidence of this match. Values range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
// This value is for informational purpose only and is only used to help match
// the best intent within the classification threshold. This value may change
// for the same end-user expression at any time due to a model retraining or
// change in implementation.
float confidence = 5;
}
// Request of [MatchIntent][].
message MatchIntentRequest {
// Required. The name of the session this query is sent to.
// Format: `projects//locations//agents//sessions/` or `projects//locations//agents//environments//sessions/`.
// If `Environment ID` is not specified, we assume default 'draft'
// environment.
// It's up to the API caller to choose an appropriate `Session ID`. It can be
// a random number or some type of session identifiers (preferably hashed).
// The length of the `Session ID` must not exceed 36 characters.
//
// For more information, see the [sessions
// guide](https://cloud.google.com/dialogflow/cx/docs/concept/session).
string session = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Session"
}
];
// The parameters of this query.
QueryParameters query_params = 2;
// Required. The input specification.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
}
// Response of [MatchIntent][].
message MatchIntentResponse {
// The original conversational query.
oneof query {
// If [natural language text][google.cloud.dialogflow.cx.v3.TextInput] was provided as input, this field
// will contain a copy of the text.
string text = 1;
// If an [intent][google.cloud.dialogflow.cx.v3.IntentInput] was provided as input, this field will
// contain a copy of the intent identifier.
// Format: `projects//locations//agents//intents/`.
string trigger_intent = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}];
// If [natural language speech audio][google.cloud.dialogflow.cx.v3.AudioInput] was provided as input,
// this field will contain the transcript for the audio.
string transcript = 3;
// If an [event][google.cloud.dialogflow.cx.v3.EventInput] was provided as input, this field will
// contain a copy of the event name.
string trigger_event = 6;
}
// Match results, if more than one, ordered descendingly by the confidence
// we have that the particular intent matches the query.
repeated Match matches = 4;
// The current [Page][google.cloud.dialogflow.cx.v3.Page]. Some, not all fields are filled in this message,
// including but not limited to `name` and `display_name`.
Page current_page = 5;
}
// Request of [FulfillIntent][]
message FulfillIntentRequest {
// Must be same as the corresponding MatchIntent request, otherwise the
// behavior is undefined.
MatchIntentRequest match_intent_request = 1;
// The matched intent/event to fulfill.
Match match = 2;
// Instructs the speech synthesizer how to generate output audio.
OutputAudioConfig output_audio_config = 3;
}
// Response of [FulfillIntent][]
message FulfillIntentResponse {
// Output only. The unique identifier of the response. It can be used to
// locate a response in the training example set or for reporting issues.
string response_id = 1;
// The result of the conversational query.
QueryResult query_result = 2;
// The audio data bytes encoded as specified in the request.
// Note: The output audio is generated based on the values of default platform
// text responses found in the
// [`query_result.response_messages`][google.cloud.dialogflow.cx.v3.QueryResult.response_messages] field. If
// multiple default text responses exist, they will be concatenated when
// generating audio. If no default platform text responses exist, the
// generated audio content will be empty.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
bytes output_audio = 3;
// The config used by the speech synthesizer to generate the output audio.
OutputAudioConfig output_audio_config = 4;
}
// The result of sentiment analysis. Sentiment analysis inspects user input
// and identifies the prevailing subjective opinion, especially to determine a
// user's attitude as positive, negative, or neutral.
message SentimentAnalysisResult {
// Sentiment score between -1.0 (negative sentiment) and 1.0 (positive
// sentiment).
float score = 1;
// A non-negative number in the [0, +inf) range, which represents the absolute
// magnitude of sentiment, regardless of score (positive or negative).
float magnitude = 2;
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy