![JAR search and dependency download from the Maven repository](/logo.png)
google.cloud.dialogflow.v2beta1.participant.proto Maven / Gradle / Ivy
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.dialogflow.v2beta1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/dialogflow/v2beta1/audio_config.proto";
import "google/cloud/dialogflow/v2beta1/session.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.V2Beta1";
option go_package = "cloud.google.com/go/dialogflow/apiv2beta1/dialogflowpb;dialogflowpb";
option java_multiple_files = true;
option java_outer_classname = "ParticipantProto";
option java_package = "com.google.cloud.dialogflow.v2beta1";
option objc_class_prefix = "DF";
// Service for managing
// [Participants][google.cloud.dialogflow.v2beta1.Participant].
service Participants {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/dialogflow";
// Creates a new participant in a conversation.
rpc CreateParticipant(CreateParticipantRequest) returns (Participant) {
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*}/participants"
body: "participant"
additional_bindings {
post: "/v2beta1/{parent=projects/*/locations/*/conversations/*}/participants"
body: "participant"
}
};
option (google.api.method_signature) = "parent,participant";
}
// Retrieves a conversation participant.
rpc GetParticipant(GetParticipantRequest) returns (Participant) {
option (google.api.http) = {
get: "/v2beta1/{name=projects/*/conversations/*/participants/*}"
additional_bindings {
get: "/v2beta1/{name=projects/*/locations/*/conversations/*/participants/*}"
}
};
option (google.api.method_signature) = "name";
}
// Returns the list of all participants in the specified conversation.
rpc ListParticipants(ListParticipantsRequest)
returns (ListParticipantsResponse) {
option (google.api.http) = {
get: "/v2beta1/{parent=projects/*/conversations/*}/participants"
additional_bindings {
get: "/v2beta1/{parent=projects/*/locations/*/conversations/*}/participants"
}
};
option (google.api.method_signature) = "parent";
}
// Updates the specified participant.
rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) {
option (google.api.http) = {
patch: "/v2beta1/{participant.name=projects/*/conversations/*/participants/*}"
body: "participant"
additional_bindings {
patch: "/v2beta1/{participant.name=projects/*/locations/*/conversations/*/participants/*}"
body: "participant"
}
};
option (google.api.method_signature) = "participant,update_mask";
}
// Adds a text (chat, for example), or audio (phone recording, for example)
// message from a participant into the conversation.
//
// Note: Always use agent versions for production traffic
// sent to virtual agents. See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) {
option (google.api.http) = {
post: "/v2beta1/{participant=projects/*/conversations/*/participants/*}:analyzeContent"
body: "*"
additional_bindings {
post: "/v2beta1/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent"
body: "*"
}
};
option (google.api.method_signature) = "participant,text_input";
option (google.api.method_signature) = "participant,audio_input";
option (google.api.method_signature) = "participant,event_input";
}
// Adds a text (e.g., chat) or audio (e.g., phone recording) message from a
// participant into the conversation.
// Note: This method is only available through the gRPC API (not REST).
//
// The top-level message sent to the client by the server is
// `StreamingAnalyzeContentResponse`. Multiple response messages can be
// returned in order. The first one or more messages contain the
// `recognition_result` field. Each result represents a more complete
// transcript of what the user said. The next message contains the
// `reply_text` field, and potentially the `reply_audio` and/or the
// `automated_agent_reply` fields.
//
// Note: Always use agent versions for production traffic
// sent to virtual agents. See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest)
returns (stream StreamingAnalyzeContentResponse) {}
// Gets suggested articles for a participant based on specific historical
// messages.
//
// Note that
// [ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions]
// will only list the auto-generated suggestions, while
// [CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion]
// will try to compile suggestion based on the provided conversation context
// in the real time.
rpc SuggestArticles(SuggestArticlesRequest)
returns (SuggestArticlesResponse) {
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles"
body: "*"
additional_bindings {
post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets suggested faq answers for a participant based on specific historical
// messages.
rpc SuggestFaqAnswers(SuggestFaqAnswersRequest)
returns (SuggestFaqAnswersResponse) {
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
body: "*"
additional_bindings {
post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets smart replies for a participant based on specific historical
// messages.
rpc SuggestSmartReplies(SuggestSmartRepliesRequest)
returns (SuggestSmartRepliesResponse) {
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
body: "*"
additional_bindings {
post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets knowledge assist suggestions based on historical messages.
rpc SuggestKnowledgeAssist(SuggestKnowledgeAssistRequest)
returns (SuggestKnowledgeAssistResponse) {
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestKnowledgeAssist"
body: "*"
additional_bindings {
post: "/v2beta1/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestKnowledgeAssist"
body: "*"
}
};
}
// Deprecated: Use inline suggestion, event based suggestion or
// Suggestion* API instead.
// See
// [HumanAgentAssistantConfig.name][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.name]
// for more details. Removal Date: 2020-09-01.
//
// Retrieves suggestions for live agents.
//
// This method should be used by human agent client software to fetch auto
// generated suggestions in real-time, while the conversation with an end user
// is in progress. The functionality is implemented in terms of the
// [list
// pagination](https://cloud.google.com/apis/design/design_patterns#list_pagination)
// design pattern. The client app should use the `next_page_token` field
// to fetch the next batch of suggestions. `suggestions` are sorted by
// `create_time` in descending order.
// To fetch latest suggestion, just set `page_size` to 1.
// To fetch new suggestions without duplication, send request with filter
// `create_time_epoch_microseconds > [first item's create_time of previous
// request]` and empty page_token.
rpc ListSuggestions(ListSuggestionsRequest)
returns (ListSuggestionsResponse) {
option deprecated = true;
option (google.api.http) = {
get: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions"
};
}
// Deprecated. use
// [SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles]
// and
// [SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers]
// instead.
//
// Gets suggestions for a participant based on specific historical
// messages.
//
// Note that
// [ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions]
// will only list the auto-generated suggestions, while
// [CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion]
// will try to compile suggestion based on the provided conversation context
// in the real time.
rpc CompileSuggestion(CompileSuggestionRequest)
returns (CompileSuggestionResponse) {
option deprecated = true;
option (google.api.http) = {
post: "/v2beta1/{parent=projects/*/conversations/*/participants/*}/suggestions:compile"
body: "*"
};
}
}
// Represents a conversation participant (human agent, virtual agent, end-user).
message Participant {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Participant"
pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
};
// Enumeration of the roles a participant can play in a conversation.
enum Role {
// Participant role not set.
ROLE_UNSPECIFIED = 0;
// Participant is a human agent.
HUMAN_AGENT = 1;
// Participant is an automated agent, such as a Dialogflow agent.
AUTOMATED_AGENT = 2;
// Participant is an end user that has called or chatted with
// Dialogflow services.
END_USER = 3;
}
// Optional. The unique identifier of this participant.
// Format: `projects//locations//conversations//participants/`.
string name = 1 [(google.api.field_behavior) = OPTIONAL];
// Immutable. The role this participant plays in the conversation. This field
// must be set during participant creation and is then immutable.
Role role = 2 [(google.api.field_behavior) = IMMUTABLE];
// Optional. Obfuscated user id that should be associated with the created
// participant.
//
// You can specify a user id as follows:
//
// 1. If you set this field in
// [CreateParticipantRequest][google.cloud.dialogflow.v2beta1.CreateParticipantRequest.participant]
// or
// [UpdateParticipantRequest][google.cloud.dialogflow.v2beta1.UpdateParticipantRequest.participant],
// Dialogflow adds the obfuscated user id with the participant.
//
// 2. If you set this field in
// [AnalyzeContent][google.cloud.dialogflow.v2beta1.AnalyzeContentRequest.obfuscated_external_user_id]
// or
// [StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.obfuscated_external_user_id],
// Dialogflow will update
// [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2beta1.Participant.obfuscated_external_user_id].
//
// Dialogflow uses this user id for billing and measurement. If a user with
// the same obfuscated_external_user_id is created in a later conversation,
// Dialogflow will know it's the same user.
//
// Dialogflow also uses this user id for Agent Assist suggestion
// personalization. For example, Dialogflow can use it to provide personalized
// smart reply suggestions for this user.
//
// Note:
//
// * Please never pass raw user ids to Dialogflow. Always obfuscate your user
// id first.
// * Dialogflow only accepts a UTF-8 encoded string, e.g., a hex digest of a
// hash function like SHA-512.
// * The length of the user id must be <= 256 characters.
string obfuscated_external_user_id = 7
[(google.api.field_behavior) = OPTIONAL];
// Optional. Key-value filters on the metadata of documents returned by
// article suggestion. If specified, article suggestion only returns suggested
// documents that match all filters in their
// [Document.metadata][google.cloud.dialogflow.v2beta1.Document.metadata].
// Multiple values for a metadata key should be concatenated by comma. For
// example, filters to match all documents that have 'US' or 'CA' in their
// market metadata values and 'agent' in their user metadata values will be
// ```
// documents_metadata_filters {
// key: "market"
// value: "US,CA"
// }
// documents_metadata_filters {
// key: "user"
// value: "agent"
// }
// ```
map documents_metadata_filters = 8
[(google.api.field_behavior) = OPTIONAL];
}
// Represents a message posted into a conversation.
message Message {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Message"
pattern: "projects/{project}/conversations/{conversation}/messages/{message}"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}"
};
// Optional. The unique identifier of the message.
// Format: `projects//locations//conversations//messages/`.
string name = 1 [(google.api.field_behavior) = OPTIONAL];
// Required. The message content.
string content = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. Automated agent responses.
repeated ResponseMessage response_messages = 11
[(google.api.field_behavior) = OPTIONAL];
// Optional. The message language.
// This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag. Example: "en-US".
string language_code = 3 [(google.api.field_behavior) = OPTIONAL];
// Output only. The participant that sends this message.
string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The role of the participant.
Participant.Role participant_role = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The time when the message was created in Contact Center AI.
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The time when the message was sent.
google.protobuf.Timestamp send_time = 9
[(google.api.field_behavior) = OPTIONAL];
// Output only. The annotation for the message.
MessageAnnotation message_annotation = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The sentiment analysis result for the message.
SentimentAnalysisResult sentiment_analysis = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The request message for
// [Participants.CreateParticipant][google.cloud.dialogflow.v2beta1.Participants.CreateParticipant].
message CreateParticipantRequest {
// Required. Resource identifier of the conversation adding the participant.
// Format: `projects//locations//conversations/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "dialogflow.googleapis.com/Participant"
}
];
// Required. The participant to create.
Participant participant = 2 [(google.api.field_behavior) = REQUIRED];
}
// The request message for
// [Participants.GetParticipant][google.cloud.dialogflow.v2beta1.Participants.GetParticipant].
message GetParticipantRequest {
// Required. The name of the participant. Format:
// `projects//locations//conversations//participants/`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
}
// The request message for
// [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].
message ListParticipantsRequest {
// Required. The conversation to list all participants from.
// Format: `projects//locations//conversations/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The maximum number of items to return in a single page. By
// default 100 and at most 1000.
int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. The next_page_token value returned from a previous list request.
string page_token = 3 [(google.api.field_behavior) = OPTIONAL];
}
// The response message for
// [Participants.ListParticipants][google.cloud.dialogflow.v2beta1.Participants.ListParticipants].
message ListParticipantsResponse {
// The list of participants. There is a maximum number of items
// returned based on the page_size field in the request.
repeated Participant participants = 1;
// Token to retrieve the next page of results or empty if there are no
// more results in the list.
string next_page_token = 2;
}
// The request message for
// [Participants.UpdateParticipant][google.cloud.dialogflow.v2beta1.Participants.UpdateParticipant].
message UpdateParticipantRequest {
// Required. The participant to update.
Participant participant = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The mask to specify which fields to update.
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// Represents the natural language speech audio to be processed.
message AudioInput {
// Required. Instructs the speech recognizer how to process the speech audio.
InputAudioConfig config = 1;
// Required. The natural language speech audio to be processed.
// A single request can contain up to 2 minutes of speech audio data.
// The transcribed text cannot contain more than 256 bytes for virtual agent
// interactions.
bytes audio = 2;
}
// Represents the natural language speech audio to be played to the end user.
message OutputAudio {
// Required. Instructs the speech synthesizer how to generate the speech
// audio.
OutputAudioConfig config = 1;
// Required. The natural language speech audio.
bytes audio = 2;
}
// Represents a response from an automated agent.
message AutomatedAgentReply {
// Represents different automated agent reply types.
enum AutomatedAgentReplyType {
// Not specified. This should never happen.
AUTOMATED_AGENT_REPLY_TYPE_UNSPECIFIED = 0;
// Partial reply. e.g. Aggregated responses in a `Fulfillment` that enables
// `return_partial_response` can be returned as partial reply.
// WARNING: partial reply is not eligible for barge-in.
PARTIAL = 1;
// Final reply.
FINAL = 2;
}
// Required.
oneof response {
// Response of the Dialogflow
// [Sessions.DetectIntent][google.cloud.dialogflow.v2beta1.Sessions.DetectIntent]
// call.
DetectIntentResponse detect_intent_response = 1;
}
// Response messages from the automated agent.
repeated ResponseMessage response_messages = 3;
// Info on the query match for the automated agent response.
oneof match {
// Name of the intent if an intent is matched for the query.
// For a V2 query, the value format is `projects//locations/
// /agent/intents/`.
// For a V3 query, the value format is `projects//locations/
// /agents//intents/`.
string intent = 4 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Intent"
}];
// Event name if an event is triggered for the query.
string event = 5;
}
// The confidence of the match. Values range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
// This value is for informational purpose only and is only used to help match
// the best intent within the classification threshold. This value may change
// for the same end-user expression at any time due to a model retraining or
// change in implementation.
float match_confidence = 9;
// The collection of current parameters at the time of this response.
google.protobuf.Struct parameters = 10;
// The collection of current Dialogflow CX agent session parameters at the
// time of this response.
// Deprecated: Use `parameters` instead.
google.protobuf.Struct cx_session_parameters = 6 [deprecated = true];
// AutomatedAgentReply type.
AutomatedAgentReplyType automated_agent_reply_type = 7;
// Indicates whether the partial automated agent reply is interruptible when a
// later reply message arrives. e.g. if the agent specified some music as
// partial response, it can be cancelled.
bool allow_cancellation = 8;
// The unique identifier of the current Dialogflow CX conversation page.
// Format: `projects//locations//agents//flows//pages/`.
string cx_current_page = 11;
// The auth code for accessing Call Companion UI.
bytes call_companion_auth_code = 12;
}
// Represents the selection of a suggestion.
message SuggestionInput {
// Required. The ID of a suggestion selected by the human agent.
// The suggestion(s) were generated in a previous call to
// request Dialogflow assist.
// The format is:
// `projects//locations//answerRecords/` where is an alphanumeric string.
string answer_record = 1;
// Optional. If the customer edited the suggestion before using it, include
// the revised text here.
TextInput text_override = 2;
// In Dialogflow assist for v3, the user can submit a form by sending
// a [SuggestionInput][google.cloud.dialogflow.v2beta1.SuggestionInput]. The
// form is uniquely determined by the
// [answer_record][google.cloud.dialogflow.v2beta1.SuggestionInput.answer_record]
// field, which identifies a v3
// [QueryResult][google.cloud.dialogflow.v3alpha1.QueryResult] containing the
// current [page][google.cloud.dialogflow.v3alpha1.Page]. The form parameters
// are specified via the
// [parameters][google.cloud.dialogflow.v2beta1.SuggestionInput.parameters]
// field.
//
// Depending on your protocol or client library language, this is a
// map, associative array, symbol table, dictionary, or JSON object
// composed of a collection of (MapKey, MapValue) pairs:
//
// * MapKey type: string
// * MapKey value: parameter name
// * MapValue type: If parameter's entity type is a composite entity then use
// map, otherwise, depending on the parameter value type, it could be one of
// string, number, boolean, null, list or map.
// * MapValue value: If parameter's entity type is a composite entity then use
// map from composite entity property names to property values, otherwise,
// use parameter value.
google.protobuf.Struct parameters = 4;
// The intent to be triggered on V3 agent.
IntentInput intent_input = 6;
}
// Represents the intent to trigger programmatically rather than as a result of
// natural language processing. The intent input is only used for V3 agent.
message IntentInput {
// Required. The unique identifier of the intent in V3 agent.
// Format: `projects//locations//locations//agents//intents/`.
string intent = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The language of this conversational query. See [Language
// Support](https://cloud.google.com/dialogflow/docs/reference/language)
// for a list of the currently supported language codes.
string language_code = 3 [(google.api.field_behavior) = REQUIRED];
}
// The type of Human Agent Assistant API suggestion to perform, and the maximum
// number of results to return for that type. Multiple `Feature` objects can
// be specified in the `features` list.
message SuggestionFeature {
// Defines the type of Human Agent Assistant feature.
enum Type {
// Unspecified feature type.
TYPE_UNSPECIFIED = 0;
// Run article suggestion model for chat.
ARTICLE_SUGGESTION = 1;
// Run FAQ model.
FAQ = 2;
// Run smart reply model for chat.
SMART_REPLY = 3;
// Run Dialogflow assist model for chat, which will return automated agent
// response as suggestion.
DIALOGFLOW_ASSIST = 4;
// Run conversation summarization model for chat.
CONVERSATION_SUMMARIZATION = 8;
// Run knowledge search with text input from agent or text generated query.
KNOWLEDGE_SEARCH = 14;
// Run knowledge assist with automatic query generation.
KNOWLEDGE_ASSIST = 15;
}
// Type of Human Agent Assistant API feature to request.
Type type = 1;
}
// Represents the parameters of human assist query.
message AssistQueryParameters {
// Key-value filters on the metadata of documents returned by article
// suggestion. If specified, article suggestion only returns suggested
// documents that match all filters in their
// [Document.metadata][google.cloud.dialogflow.v2beta1.Document.metadata].
// Multiple values for a metadata key should be concatenated by comma. For
// example, filters to match all documents that have 'US' or 'CA' in their
// market metadata values and 'agent' in their user metadata values will be
// ```
// documents_metadata_filters {
// key: "market"
// value: "US,CA"
// }
// documents_metadata_filters {
// key: "user"
// value: "agent"
// }
// ```
map documents_metadata_filters = 1;
}
// The request message for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].
message AnalyzeContentRequest {
// Required. The name of the participant this text comes from.
// Format: `projects//locations//conversations//participants/`.
string participant = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Required. The input content.
oneof input {
// The natural language text to be processed.
TextInput text_input = 6;
// The natural language speech audio to be processed.
AudioInput audio_input = 7;
// An input event to send to Dialogflow.
EventInput event_input = 8;
// An input representing the selection of a suggestion.
SuggestionInput suggestion_input = 12;
// The intent to be triggered on V3 agent.
IntentInput intent_input = 13;
}
// Speech synthesis configuration.
// The speech synthesis settings for a virtual agent that may be configured
// for the associated conversation profile are not used when calling
// AnalyzeContent. If this configuration is not supplied, speech synthesis
// is disabled.
OutputAudioConfig reply_audio_config = 5;
// Parameters for a Dialogflow virtual-agent query.
QueryParameters query_params = 9;
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 14;
// Additional parameters to be put into Dialogflow CX session parameters. To
// remove a parameter from the session, clients should explicitly set the
// parameter value to null.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
google.protobuf.Struct cx_parameters = 18;
// The unique identifier of the CX page to override the `current_page` in the
// session.
// Format: `projects//locations//agents//flows//pages/`.
//
// If `cx_current_page` is specified, the previous state of the session will
// be ignored by Dialogflow CX, including the [previous
// page][QueryResult.current_page] and the [previous session
// parameters][QueryResult.parameters]. In most cases, `cx_current_page` and
// `cx_parameters` should be configured together to direct a session to a
// specific state.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
string cx_current_page = 20;
// Optional. The send time of the message from end user or human agent's
// perspective. It is used for identifying the same message under one
// participant.
//
// Given two messages under the same participant:
// * If send time are different regardless of whether the content of the
// messages are exactly the same, the conversation will regard them as
// two distinct messages sent by the participant.
// * If send time is the same regardless of whether the content of the
// messages are exactly the same, the conversation will regard them as
// same message, and ignore the message received later.
//
// If the value is not provided, a new request will always be regarded as a
// new message without any de-duplication.
google.protobuf.Timestamp message_send_time = 10;
// A unique identifier for this request. Restricted to 36 ASCII characters.
// A random UUID is recommended.
// This request is only idempotent if a `request_id` is provided.
string request_id = 11;
}
// The message in the response that indicates the parameters of DTMF.
message DtmfParameters {
// Indicates whether DTMF input can be handled in the next request.
bool accepts_dtmf_input = 1;
}
// The response message for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent].
message AnalyzeContentResponse {
// Output only. The output text content.
// This field is set if the automated agent responded with text to show to
// the user.
string reply_text = 1;
// Optional. The audio data bytes encoded as specified in the request.
// This field is set if:
//
// - `reply_audio_config` was specified in the request, or
// - The automated agent responded with audio to play to the user. In such
// case, `reply_audio.config` contains settings used to synthesize the
// speech.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
OutputAudio reply_audio = 2;
// Optional. Only set if a Dialogflow automated agent has responded.
// Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
// and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
// are always empty, use
// [reply_audio][google.cloud.dialogflow.v2beta1.AnalyzeContentResponse.reply_audio]
// instead.
AutomatedAgentReply automated_agent_reply = 3;
// Output only. Message analyzed by CCAI.
Message message = 5;
// The suggestions for most recent human agent. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.human_agent_suggestion_config].
//
// Note that any failure of Agent Assist features will not lead to the overall
// failure of an AnalyzeContent API call. Instead, the features will
// fail silently with the error field set in the corresponding
// SuggestionResult.
repeated SuggestionResult human_agent_suggestion_results = 6;
// The suggestions for end user. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.end_user_suggestion_config].
//
// Same as human_agent_suggestion_results, any failure of Agent Assist
// features will not lead to the overall failure of an AnalyzeContent API
// call. Instead, the features will fail silently with the error field set in
// the corresponding SuggestionResult.
repeated SuggestionResult end_user_suggestion_results = 7;
// Indicates the parameters of DTMF.
DtmfParameters dtmf_parameters = 9;
}
// Defines the language used in the input text.
message InputTextConfig {
// Required. The language of this conversational query. See [Language
// Support](https://cloud.google.com/dialogflow/docs/reference/language)
// for a list of the currently supported language codes.
string language_code = 1;
}
// The top-level message sent by the client to the
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent]
// method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [participant][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.participant],
// [config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
// and optionally
// [query_params][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.query_params].
// If you want to receive an audio response, it should also contain
// [reply_audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.reply_audio_config].
// The message must not contain
// [input][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input].
//
// 2. If
// [config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
// in the first message
// was set to
// [audio_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.audio_config],
// all subsequent messages must contain
// [input_audio][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_audio]
// to continue with Speech recognition. If you decide to rather analyze text
// input after you already started Speech recognition, please send a message
// with
// [StreamingAnalyzeContentRequest.input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
//
// However, note that:
//
// * Dialogflow will bill you for the audio so far.
// * Dialogflow discards all Speech recognition results in favor of the
// text input.
//
// 3. If
// [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.config]
// in the first message was set
// to
// [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.text_config],
// then the second message must contain only
// [input_text][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentRequest.input_text].
// Moreover, you must not send more than two messages.
//
// After you sent all input, you must half-close or abort the request stream.
message StreamingAnalyzeContentRequest {
// Required. The name of the participant this text comes from.
// Format: `projects//locations//conversations//participants/`.
string participant = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Required. The input config.
oneof config {
// Instructs the speech recognizer how to process the speech audio.
InputAudioConfig audio_config = 2;
// The natural language text to be processed.
InputTextConfig text_config = 3;
}
// Speech synthesis configuration.
// The speech synthesis settings for a virtual agent that may be configured
// for the associated conversation profile are not used when calling
// StreamingAnalyzeContent. If this configuration is not supplied, speech
// synthesis is disabled.
OutputAudioConfig reply_audio_config = 4;
// Required. The input.
oneof input {
// The input audio content to be recognized. Must be sent if `audio_config`
// is set in the first message. The complete audio over all streaming
// messages must not exceed 1 minute.
bytes input_audio = 5;
// The UTF-8 encoded natural language text to be processed. Must be sent if
// `text_config` is set in the first message. Text length must not exceed
// 256 bytes for virtual agent interactions. The `input_text` field can be
// only sent once, and would cancel the speech recognition if any ongoing.
string input_text = 6;
// The DTMF digits used to invoke intent and fill in parameter value.
//
// This input is ignored if the previous response indicated that DTMF input
// is not accepted.
TelephonyDtmfEvents input_dtmf = 9;
// The intent to be triggered on V3 agent.
// Format: `projects//locations//locations/
// /agents//intents/`.
string input_intent = 17;
// The input event name.
// This can only be sent once and would cancel the ongoing speech
// recognition if any.
string input_event = 20;
}
// Parameters for a Dialogflow virtual-agent query.
QueryParameters query_params = 7;
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 8;
// Additional parameters to be put into Dialogflow CX session parameters. To
// remove a parameter from the session, clients should explicitly set the
// parameter value to null.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
google.protobuf.Struct cx_parameters = 13;
// The unique identifier of the CX page to override the `current_page` in the
// session.
// Format: `projects//locations//agents//flows//pages/`.
//
// If `cx_current_page` is specified, the previous state of the session will
// be ignored by Dialogflow CX, including the [previous
// page][QueryResult.current_page] and the [previous session
// parameters][QueryResult.parameters]. In most cases, `cx_current_page` and
// `cx_parameters` should be configured together to direct a session to a
// specific state.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
string cx_current_page = 15;
// Optional. Enable full bidirectional streaming. You can keep streaming the
// audio until timeout, and there's no need to half close the stream to get
// the response.
//
// Restrictions:
//
// - Timeout: 3 mins.
// - Audio Encoding: only supports
// [AudioEncoding.AUDIO_ENCODING_LINEAR_16][google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_LINEAR_16]
// and
// [AudioEncoding.AUDIO_ENCODING_MULAW][google.cloud.dialogflow.v2beta1.AudioEncoding.AUDIO_ENCODING_MULAW]
// - Lifecycle: conversation should be in `Assist Stage`, go to
// [Conversation.CreateConversation][] for more information.
//
// InvalidArgument Error will be returned if the one of restriction checks
// failed.
//
// You can find more details in
// https://cloud.google.com/agent-assist/docs/extended-streaming
bool enable_extended_streaming = 11 [(google.api.field_behavior) = OPTIONAL];
// Enable partial virtual agent responses. If this flag is not enabled,
// response stream still contains only one final response even if some
// `Fulfillment`s in Dialogflow virtual agent have been configured to return
// partial responses.
bool enable_partial_automated_agent_reply = 12;
// if true, `StreamingAnalyzeContentResponse.debugging_info` will get
// populated.
bool enable_debugging_info = 19;
}
// The top-level message returned from the `StreamingAnalyzeContent` method.
//
// Multiple response messages can be returned in order:
//
// 1. If the input was set to streaming audio, the first one or more messages
// contain `recognition_result`. Each `recognition_result` represents a more
// complete transcript of what the user said. The last `recognition_result`
// has `is_final` set to `true`.
//
// 2. In virtual agent stage: if `enable_partial_automated_agent_reply` is
// true, the following N (currently 1 <= N <= 4) messages
// contain `automated_agent_reply` and optionally `reply_audio`
// returned by the virtual agent. The first (N-1)
// `automated_agent_reply`s will have `automated_agent_reply_type` set to
// `PARTIAL`. The last `automated_agent_reply` has
// `automated_agent_reply_type` set to `FINAL`.
// If `enable_partial_automated_agent_reply` is not enabled, response stream
// only contains the final reply.
//
// In human assist stage: the following N (N >= 1) messages contain
// `human_agent_suggestion_results`, `end_user_suggestion_results` or
// `message`.
message StreamingAnalyzeContentResponse {
// The result of speech recognition.
StreamingRecognitionResult recognition_result = 1;
// Optional. The output text content.
// This field is set if an automated agent responded with a text for the user.
string reply_text = 2;
// Optional. The audio data bytes encoded as specified in the request.
// This field is set if:
//
// - The `reply_audio_config` field is specified in the request.
// - The automated agent, which this output comes from, responded with audio.
// In such case, the `reply_audio.config` field contains settings used to
// synthesize the speech.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
OutputAudio reply_audio = 3;
// Optional. Only set if a Dialogflow automated agent has responded.
// Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
// and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
// are always empty, use
// [reply_audio][google.cloud.dialogflow.v2beta1.StreamingAnalyzeContentResponse.reply_audio]
// instead.
AutomatedAgentReply automated_agent_reply = 4;
// Output only. Message analyzed by CCAI.
Message message = 6;
// The suggestions for most recent human agent. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.human_agent_suggestion_config].
repeated SuggestionResult human_agent_suggestion_results = 7;
// The suggestions for end user. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2beta1.HumanAgentAssistantConfig.end_user_suggestion_config].
repeated SuggestionResult end_user_suggestion_results = 8;
// Indicates the parameters of DTMF.
DtmfParameters dtmf_parameters = 10;
// Debugging info that would get populated when
// `StreamingAnalyzeContentRequest.enable_debugging_info` is set to true.
CloudConversationDebuggingInfo debugging_info = 11;
}
// Represents a part of a message possibly annotated with an entity. The part
// can be an entity or purely a part of the message between two entities or
// message start/end.
message AnnotatedMessagePart {
// Required. A part of a message possibly annotated with an entity.
string text = 1;
// Optional. The [Dialogflow system entity
// type](https://cloud.google.com/dialogflow/docs/reference/system-entities)
// of this message part. If this is empty, Dialogflow could not annotate the
// phrase part with a system entity.
string entity_type = 2;
// Optional. The [Dialogflow system entity formatted value
// ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of
// this message part. For example for a system entity of type
// `@sys.unit-currency`, this may contain:
//
// {
// "amount": 5,
// "currency": "USD"
// }
//
google.protobuf.Value formatted_value = 3;
}
// Represents the result of annotation for the message.
message MessageAnnotation {
// Optional. The collection of annotated message parts ordered by their
// position in the message. You can recover the annotated message by
// concatenating [AnnotatedMessagePart.text].
repeated AnnotatedMessagePart parts = 1;
// Required. Indicates whether the text message contains entities.
bool contain_entities = 2;
}
// Represents article answer.
message ArticleAnswer {
// The article title.
string title = 1;
// The article URI.
string uri = 2;
// Output only. Article snippets.
repeated string snippets = 3;
// A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Represents answer from "frequently asked questions".
message FaqAnswer {
// The piece of text from the `source` knowledge base document.
string answer = 1;
// The system's confidence score that this Knowledge answer is a good match
// for this conversational query, range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
float confidence = 2;
// The corresponding FAQ question.
string question = 3;
// Indicates which Knowledge Document this answer was extracted
// from.
// Format: `projects//locations//agent/knowledgeBases//documents/`.
string source = 4;
// A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Represents a smart reply answer.
message SmartReplyAnswer {
// The content of the reply.
string reply = 1;
// Smart reply confidence.
// The system's confidence score that this reply is a good match for
// this conversation, as a value from 0.0 (completely uncertain) to 1.0
// (completely certain).
float confidence = 2;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 3 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/AnswerRecord"
}];
}
// Represents an intent suggestion.
message IntentSuggestion {
// The display name of the intent.
string display_name = 1;
// The name of the intent.
oneof intent {
// The unique identifier of this
// [intent][google.cloud.dialogflow.v2beta1.Intent]. Format:
// `projects//locations//agent/intents/`.
string intent_v2 = 2;
}
// Human readable description for better understanding an intent like its
// scope, content, result etc. Maximum character limit: 140 characters.
string description = 5;
}
// Represents a Dialogflow assist answer.
message DialogflowAssistAnswer {
// Result from DetectIntent for one matched intent.
oneof result {
// Result from v2 agent.
QueryResult query_result = 1;
// An intent suggestion generated from conversation.
IntentSuggestion intent_suggestion = 5;
}
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 2;
}
// One response of different type of suggestion response which is used in
// the response of
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent]
// and
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent],
// as well as
// [HumanAgentAssistantEvent][google.cloud.dialogflow.v2beta1.HumanAgentAssistantEvent].
message SuggestionResult {
// Different type of suggestion response.
oneof suggestion_response {
// Error status if the request failed.
google.rpc.Status error = 1;
// SuggestArticlesResponse if request is for ARTICLE_SUGGESTION.
SuggestArticlesResponse suggest_articles_response = 2;
// SuggestKnowledgeAssistResponse if request is for KNOWLEDGE_ASSIST.
SuggestKnowledgeAssistResponse suggest_knowledge_assist_response = 8;
// SuggestFaqAnswersResponse if request is for FAQ_ANSWER.
SuggestFaqAnswersResponse suggest_faq_answers_response = 3;
// SuggestSmartRepliesResponse if request is for SMART_REPLY.
SuggestSmartRepliesResponse suggest_smart_replies_response = 4;
// SuggestDialogflowAssistsResponse if request is for DIALOGFLOW_ASSIST.
SuggestDialogflowAssistsResponse suggest_dialogflow_assists_response = 5;
// SuggestDialogflowAssistsResponse if request is for ENTITY_EXTRACTION.
SuggestDialogflowAssistsResponse suggest_entity_extraction_response = 7;
}
}
// The request message for
// [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].
message SuggestArticlesRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesRequest.latest_message]
// to use as context when compiling the suggestion. By default 20 and at
// most 50.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. Parameters for a human assist query.
AssistQueryParameters assist_query_params = 4
[(google.api.field_behavior) = OPTIONAL];
}
// The response message for
// [Participants.SuggestArticles][google.cloud.dialogflow.v2beta1.Participants.SuggestArticles].
message SuggestArticlesResponse {
// Output only. Articles ordered by score in descending order.
repeated ArticleAnswer article_answers = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestArticlesResponse.context_size][google.cloud.dialogflow.v2beta1.SuggestArticlesResponse.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// The request message for
// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].
message SuggestFaqAnswersRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message] to use as context when compiling the
// suggestion. By default 20 and at most 50.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. Parameters for a human assist query.
AssistQueryParameters assist_query_params = 4
[(google.api.field_behavior) = OPTIONAL];
}
// The request message for
// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2beta1.Participants.SuggestFaqAnswers].
message SuggestFaqAnswersResponse {
// Output only. Answers extracted from FAQ documents.
repeated FaqAnswer faq_answers = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestFaqAnswersRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// The request message for
// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].
message SuggestSmartRepliesRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// The current natural language text segment to compile suggestion
// for. This provides a way for user to get follow up smart reply suggestion
// after a smart reply selection, without sending a text message.
TextInput current_text_input = 4;
// The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}];
// Optional. Max number of messages prior to and including
// [latest_message] to use as context when compiling the
// suggestion. By default 20 and at most 50.
int32 context_size = 3;
}
// The response message for
// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2beta1.Participants.SuggestSmartReplies].
message SuggestSmartRepliesResponse {
// Output only. Multiple reply options provided by smart reply service. The
// order is based on the rank of the model prediction.
// The maximum number of the returned replies is set in SmartReplyConfig.
repeated SmartReplyAnswer smart_reply_answers = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}];
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestSmartRepliesRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// The response message for
// [Participants.SuggestDialogflowAssists][google.cloud.dialogflow.v2beta1.Participants.SuggestDialogflowAssists].
message SuggestDialogflowAssistsResponse {
// Output only. Multiple reply options provided by Dialogflow assist
// service. The order is based on the rank of the model prediction.
repeated DialogflowAssistAnswer dialogflow_assist_answers = 1;
// The name of the latest conversation message used to suggest answer.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestDialogflowAssistsResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestDialogflowAssistsRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestDialogflowAssistsRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// Represents a suggestion for a human agent.
message Suggestion {
option deprecated = true;
// Represents suggested article.
message Article {
// Output only. The article title.
string title = 1;
// Output only. The article URI.
string uri = 2;
// Output only. Article snippets.
repeated string snippets = 3;
// Output only. A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// Output only. The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Represents suggested answer from "frequently asked questions".
message FaqAnswer {
// Output only. The piece of text from the `source` knowledge base document.
string answer = 1;
// The system's confidence score that this Knowledge answer is a good match
// for this conversational query, range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
float confidence = 2;
// Output only. The corresponding FAQ question.
string question = 3;
// Output only. Indicates which Knowledge Document this answer was extracted
// from.
// Format: `projects//locations//agent/knowledgeBases//documents/`.
string source = 4;
// Output only. A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// Output only. The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Output only. The name of this suggestion.
// Format:
// `projects//locations//conversations//participants/*/suggestions/`.
string name = 1;
// Output only. Articles ordered by score in descending order.
repeated Article articles = 2;
// Output only. Answers extracted from FAQ documents.
repeated FaqAnswer faq_answers = 4;
// Output only. The time the suggestion was created.
google.protobuf.Timestamp create_time = 5;
// Output only. Latest message used as context to compile this suggestion.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 7;
}
// The request message for
// [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].
message ListSuggestionsRequest {
option deprecated = true;
// Required. The name of the participant to fetch suggestions for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1;
// Optional. The maximum number of items to return in a single page. The
// default value is 100; the maximum value is 1000.
int32 page_size = 2;
// Optional. The next_page_token value returned from a previous list request.
string page_token = 3;
// Optional. Filter on suggestions fields. Currently predicates on
// `create_time` and `create_time_epoch_microseconds` are supported.
// `create_time` only support milliseconds accuracy. E.g.,
// `create_time_epoch_microseconds > 1551790877964485` or
// `create_time > "2017-01-15T01:30:15.01Z"`
//
// For more information about filtering, see
// [API Filtering](https://aip.dev/160).
string filter = 4;
}
// The response message for
// [Participants.ListSuggestions][google.cloud.dialogflow.v2beta1.Participants.ListSuggestions].
message ListSuggestionsResponse {
option deprecated = true;
// Required. The list of suggestions. There will be a maximum number of items
// returned based on the page_size field in the request. `suggestions` is
// sorted by `create_time` in descending order.
repeated Suggestion suggestions = 1;
// Optional. Token to retrieve the next page of results or empty if there are
// no more results in the list.
string next_page_token = 2;
}
// The request message for
// [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].
message CompileSuggestionRequest {
option deprecated = true;
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1;
// Optional. The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Optional. Max number of messages prior to and including
// [latest_message] to use as context when compiling the
// suggestion. If zero or less than zero, 20 is used.
int32 context_size = 3;
}
// The response message for
// [Participants.CompileSuggestion][google.cloud.dialogflow.v2beta1.Participants.CompileSuggestion].
message CompileSuggestionResponse {
option deprecated = true;
// The compiled suggestion.
Suggestion suggestion = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.CompileSuggestionResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [CompileSuggestionRequest.context_size][google.cloud.dialogflow.v2beta1.CompileSuggestionRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// Response messages from an automated agent.
message ResponseMessage {
// The text response message.
message Text {
// A collection of text responses.
repeated string text = 1;
}
// Indicates that the conversation should be handed off to a human agent.
//
// Dialogflow only uses this to determine which conversations were handed off
// to a human agent for measurement purposes. What else to do with this signal
// is up to you and your handoff procedures.
//
// You may set this, for example:
//
// * In the entry fulfillment of a CX Page if entering the page indicates
// something went extremely wrong in the conversation.
// * In a webhook response when you determine that the customer issue can only
// be handled by a human.
message LiveAgentHandoff {
// Custom metadata for your handoff procedure. Dialogflow doesn't impose
// any structure on this.
google.protobuf.Struct metadata = 1;
}
// Indicates that interaction with the Dialogflow agent has ended.
message EndInteraction {}
// Represents an audio message that is composed of both segments
// synthesized from the Dialogflow agent prompts and ones hosted externally
// at the specified URIs.
message MixedAudio {
// Represents one segment of audio.
message Segment {
// Content of the segment.
oneof content {
// Raw audio synthesized from the Dialogflow agent's response using
// the output config specified in the request.
bytes audio = 1;
// Client-specific URI that points to an audio clip accessible to the
// client.
string uri = 2;
}
// Whether the playback of this segment can be interrupted by the end
// user's speech and the client should then start the next Dialogflow
// request.
bool allow_playback_interruption = 3;
}
// Segments this audio response is composed of.
repeated Segment segments = 1;
}
// Represents the signal that telles the client to transfer the phone call
// connected to the agent to a third-party endpoint.
message TelephonyTransferCall {
// Endpoint to transfer the call to.
oneof endpoint {
// Transfer the call to a phone number
// in [E.164 format](https://en.wikipedia.org/wiki/E.164).
string phone_number = 1;
// Transfer the call to a SIP endpoint.
string sip_uri = 2;
}
}
// Required. The rich response message.
oneof message {
// Returns a text response.
Text text = 1;
// Returns a response containing a custom, platform-specific payload.
google.protobuf.Struct payload = 2;
// Hands off conversation to a live agent.
LiveAgentHandoff live_agent_handoff = 3;
// A signal that indicates the interaction with the Dialogflow agent has
// ended.
EndInteraction end_interaction = 4;
// An audio response message composed of both the synthesized Dialogflow
// agent responses and the audios hosted in places known to the client.
MixedAudio mixed_audio = 5;
// A signal that the client should transfer the phone call connected to
// this agent to a third-party endpoint.
TelephonyTransferCall telephony_transfer_call = 6;
}
}
// The request message for
// [Participants.SuggestKnowledgeAssist][google.cloud.dialogflow.v2beta1.Participants.SuggestKnowledgeAssist].
message SuggestKnowledgeAssistRequest {
// Required. The name of the participant to fetch suggestions for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile
// suggestions for. If empty, it will be the latest message of the
// conversation. Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestKnowledgeAssistRequest.latest_message]
// to use as context when compiling the suggestion. The context size is by
// default 100 and at most 100.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. The previously suggested query for the given conversation. This
// helps identify whether the next suggestion we generate is resonably
// different from the previous one. This is useful to avoid similar
// suggestions within the conversation.
string previous_suggested_query = 4 [(google.api.field_behavior) = OPTIONAL];
}
// The response message for
// [Participants.SuggestKnowledgeAssist][google.cloud.dialogflow.v2beta1.Participants.SuggestKnowledgeAssist].
message SuggestKnowledgeAssistResponse {
// Output only. Knowledge Assist suggestion.
KnowledgeAssistAnswer knowledge_assist_answer = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// The name of the latest conversation message used to compile suggestion for.
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2beta1.SuggestKnowledgeAssistResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestKnowledgeAssistRequest.context_size][google.cloud.dialogflow.v2beta1.SuggestKnowledgeAssistRequest.context_size]
// field in the request if there are fewer messages in the conversation.
int32 context_size = 3;
}
// Represents a Knowledge Assist answer.
message KnowledgeAssistAnswer {
// Represents a suggested query.
message SuggestedQuery {
// Suggested query text.
string query_text = 1;
}
// Represents an answer from Knowledge. Currently supports FAQ and Generative
// answers.
message KnowledgeAnswer {
// Details about source of FAQ answer.
message FaqSource {
// The corresponding FAQ question.
string question = 2;
}
// Details about source of Generative answer.
message GenerativeSource {
// Snippet Source for a Generative Prediction.
message Snippet {
// URI the data is sourced from.
string uri = 2;
// Text taken from that URI.
string text = 3;
// Title of the document.
string title = 4;
}
// All snippets used for this Generative Prediction, with their source URI
// and data.
repeated Snippet snippets = 1;
}
// The piece of text from the `source` that answers this suggested query.
string answer_text = 1;
// Source of result.
oneof source {
// Populated if the prediction came from FAQ.
FaqSource faq_source = 3;
// Populated if the prediction was Generative.
GenerativeSource generative_source = 4;
}
}
// The query suggested based on the context. Suggestion is made only if it
// is different from the previous suggestion.
SuggestedQuery suggested_query = 1;
// The answer generated for the suggested query. Whether or not an answer is
// generated depends on how confident we are about the generated query.
KnowledgeAnswer suggested_query_answer = 2;
// The name of the answer record.
// Format: `projects//locations//answer
// Records/`.
string answer_record = 3;
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy