google.cloud.dialogflow.v2.participant.proto Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto-google-cloud-dialogflow-v2 Show documentation
Show all versions of proto-google-cloud-dialogflow-v2 Show documentation
PROTO library for proto-google-cloud-dialogflow-v2
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.dialogflow.v2;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/dialogflow/v2/audio_config.proto";
import "google/cloud/dialogflow/v2/session.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.V2";
option go_package = "cloud.google.com/go/dialogflow/apiv2/dialogflowpb;dialogflowpb";
option java_multiple_files = true;
option java_outer_classname = "ParticipantProto";
option java_package = "com.google.cloud.dialogflow.v2";
option objc_class_prefix = "DF";
// Service for managing [Participants][google.cloud.dialogflow.v2.Participant].
service Participants {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/dialogflow";
// Creates a new participant in a conversation.
rpc CreateParticipant(CreateParticipantRequest) returns (Participant) {
option (google.api.http) = {
post: "/v2/{parent=projects/*/conversations/*}/participants"
body: "participant"
additional_bindings {
post: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
body: "participant"
}
};
option (google.api.method_signature) = "parent,participant";
}
// Retrieves a conversation participant.
rpc GetParticipant(GetParticipantRequest) returns (Participant) {
option (google.api.http) = {
get: "/v2/{name=projects/*/conversations/*/participants/*}"
additional_bindings {
get: "/v2/{name=projects/*/locations/*/conversations/*/participants/*}"
}
};
option (google.api.method_signature) = "name";
}
// Returns the list of all participants in the specified conversation.
rpc ListParticipants(ListParticipantsRequest)
returns (ListParticipantsResponse) {
option (google.api.http) = {
get: "/v2/{parent=projects/*/conversations/*}/participants"
additional_bindings {
get: "/v2/{parent=projects/*/locations/*/conversations/*}/participants"
}
};
option (google.api.method_signature) = "parent";
}
// Updates the specified participant.
rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) {
option (google.api.http) = {
patch: "/v2/{participant.name=projects/*/conversations/*/participants/*}"
body: "participant"
additional_bindings {
patch: "/v2/{participant.name=projects/*/locations/*/conversations/*/participants/*}"
body: "participant"
}
};
option (google.api.method_signature) = "participant,update_mask";
}
// Adds a text (chat, for example), or audio (phone recording, for example)
// message from a participant into the conversation.
//
// Note: Always use agent versions for production traffic
// sent to virtual agents. See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) {
option (google.api.http) = {
post: "/v2/{participant=projects/*/conversations/*/participants/*}:analyzeContent"
body: "*"
additional_bindings {
post: "/v2/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent"
body: "*"
}
};
option (google.api.method_signature) = "participant,text_input";
option (google.api.method_signature) = "participant,event_input";
}
// Adds a text (chat, for example), or audio (phone recording, for example)
// message from a participant into the conversation.
// Note: This method is only available through the gRPC API (not REST).
//
// The top-level message sent to the client by the server is
// `StreamingAnalyzeContentResponse`. Multiple response messages can be
// returned in order. The first one or more messages contain the
// `recognition_result` field. Each result represents a more complete
// transcript of what the user said. The next message contains the
// `reply_text` field and potentially the `reply_audio` field. The message can
// also contain the `automated_agent_reply` field.
//
// Note: Always use agent versions for production traffic
// sent to virtual agents. See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest)
returns (stream StreamingAnalyzeContentResponse) {}
// Gets suggested articles for a participant based on specific historical
// messages.
rpc SuggestArticles(SuggestArticlesRequest)
returns (SuggestArticlesResponse) {
option (google.api.http) = {
post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles"
body: "*"
additional_bindings {
post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets suggested faq answers for a participant based on specific historical
// messages.
rpc SuggestFaqAnswers(SuggestFaqAnswersRequest)
returns (SuggestFaqAnswersResponse) {
option (google.api.http) = {
post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
body: "*"
additional_bindings {
post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets smart replies for a participant based on specific historical
// messages.
rpc SuggestSmartReplies(SuggestSmartRepliesRequest)
returns (SuggestSmartRepliesResponse) {
option (google.api.http) = {
post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
body: "*"
additional_bindings {
post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestSmartReplies"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
// Gets knowledge assist suggestions based on historical messages.
rpc SuggestKnowledgeAssist(SuggestKnowledgeAssistRequest)
returns (SuggestKnowledgeAssistResponse) {
option (google.api.http) = {
post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestKnowledgeAssist"
body: "*"
additional_bindings {
post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestKnowledgeAssist"
body: "*"
}
};
}
}
// Represents a conversation participant (human agent, virtual agent, end-user).
message Participant {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Participant"
pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
};
// Enumeration of the roles a participant can play in a conversation.
enum Role {
// Participant role not set.
ROLE_UNSPECIFIED = 0;
// Participant is a human agent.
HUMAN_AGENT = 1;
// Participant is an automated agent, such as a Dialogflow agent.
AUTOMATED_AGENT = 2;
// Participant is an end user that has called or chatted with
// Dialogflow services.
END_USER = 3;
}
// Optional. The unique identifier of this participant.
// Format: `projects//locations//conversations//participants/`.
string name = 1 [(google.api.field_behavior) = OPTIONAL];
// Immutable. The role this participant plays in the conversation. This field
// must be set during participant creation and is then immutable.
Role role = 2 [(google.api.field_behavior) = IMMUTABLE];
// Optional. Label applied to streams representing this participant in SIPREC
// XML metadata and SDP. This is used to assign transcriptions from that
// media stream to this participant. This field can be updated.
string sip_recording_media_label = 6 [(google.api.field_behavior) = OPTIONAL];
// Optional. Obfuscated user id that should be associated with the created
// participant.
//
// You can specify a user id as follows:
//
// 1. If you set this field in
// [CreateParticipantRequest][google.cloud.dialogflow.v2.CreateParticipantRequest.participant]
// or
// [UpdateParticipantRequest][google.cloud.dialogflow.v2.UpdateParticipantRequest.participant],
// Dialogflow adds the obfuscated user id with the participant.
//
// 2. If you set this field in
// [AnalyzeContent][google.cloud.dialogflow.v2.AnalyzeContentRequest.obfuscated_external_user_id]
// or
// [StreamingAnalyzeContent][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.obfuscated_external_user_id],
// Dialogflow will update
// [Participant.obfuscated_external_user_id][google.cloud.dialogflow.v2.Participant.obfuscated_external_user_id].
//
// Dialogflow returns an error if you try to add a user id for a
// non-[END_USER][google.cloud.dialogflow.v2.Participant.Role.END_USER]
// participant.
//
// Dialogflow uses this user id for billing and measurement purposes. For
// example, Dialogflow determines whether a user in one conversation returned
// in a later conversation.
//
// Note:
//
// * Please never pass raw user ids to Dialogflow. Always obfuscate your user
// id first.
// * Dialogflow only accepts a UTF-8 encoded string, e.g., a hex digest of a
// hash function like SHA-512.
// * The length of the user id must be <= 256 characters.
string obfuscated_external_user_id = 7
[(google.api.field_behavior) = OPTIONAL];
// Optional. Key-value filters on the metadata of documents returned by
// article suggestion. If specified, article suggestion only returns suggested
// documents that match all filters in their
// [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
// values for a metadata key should be concatenated by comma. For example,
// filters to match all documents that have 'US' or 'CA' in their market
// metadata values and 'agent' in their user metadata values will be
// ```
// documents_metadata_filters {
// key: "market"
// value: "US,CA"
// }
// documents_metadata_filters {
// key: "user"
// value: "agent"
// }
// ```
map documents_metadata_filters = 8
[(google.api.field_behavior) = OPTIONAL];
}
// Represents a message posted into a conversation.
message Message {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Message"
pattern: "projects/{project}/conversations/{conversation}/messages/{message}"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}"
};
// Optional. The unique identifier of the message.
// Format: `projects//locations//conversations//messages/`.
string name = 1 [(google.api.field_behavior) = OPTIONAL];
// Required. The message content.
string content = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. The message language.
// This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
// language tag. Example: "en-US".
string language_code = 3 [(google.api.field_behavior) = OPTIONAL];
// Output only. The participant that sends this message.
string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The role of the participant.
Participant.Role participant_role = 5
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The time when the message was created in Contact Center AI.
google.protobuf.Timestamp create_time = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
// Optional. The time when the message was sent.
google.protobuf.Timestamp send_time = 9
[(google.api.field_behavior) = OPTIONAL];
// Output only. The annotation for the message.
MessageAnnotation message_annotation = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The sentiment analysis result for the message.
SentimentAnalysisResult sentiment_analysis = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// The request message for
// [Participants.CreateParticipant][google.cloud.dialogflow.v2.Participants.CreateParticipant].
message CreateParticipantRequest {
// Required. Resource identifier of the conversation adding the participant.
// Format: `projects//locations//conversations/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "dialogflow.googleapis.com/Participant"
}
];
// Required. The participant to create.
Participant participant = 2 [(google.api.field_behavior) = REQUIRED];
}
// The request message for
// [Participants.GetParticipant][google.cloud.dialogflow.v2.Participants.GetParticipant].
message GetParticipantRequest {
// Required. The name of the participant. Format:
// `projects//locations//conversations//participants/`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
}
// The request message for
// [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
message ListParticipantsRequest {
// Required. The conversation to list all participants from.
// Format: `projects//locations//conversations/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The maximum number of items to return in a single page. By
// default 100 and at most 1000.
int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. The next_page_token value returned from a previous list request.
string page_token = 3 [(google.api.field_behavior) = OPTIONAL];
}
// The response message for
// [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants].
message ListParticipantsResponse {
// The list of participants. There is a maximum number of items
// returned based on the page_size field in the request.
repeated Participant participants = 1;
// Token to retrieve the next page of results or empty if there are no
// more results in the list.
string next_page_token = 2;
}
// The request message for
// [Participants.UpdateParticipant][google.cloud.dialogflow.v2.Participants.UpdateParticipant].
message UpdateParticipantRequest {
// Required. The participant to update.
Participant participant = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The mask to specify which fields to update.
google.protobuf.FieldMask update_mask = 2
[(google.api.field_behavior) = REQUIRED];
}
// The request message for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
message AnalyzeContentRequest {
// Required. The name of the participant this text comes from.
// Format: `projects//locations//conversations//participants/`.
string participant = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Required. The input content.
oneof input {
// The natural language text to be processed.
TextInput text_input = 6;
// An input event to send to Dialogflow.
EventInput event_input = 8;
// An input representing the selection of a suggestion.
SuggestionInput suggestion_input = 12;
}
// Speech synthesis configuration.
// The speech synthesis settings for a virtual agent that may be configured
// for the associated conversation profile are not used when calling
// AnalyzeContent. If this configuration is not supplied, speech synthesis
// is disabled.
OutputAudioConfig reply_audio_config = 5;
// Parameters for a Dialogflow virtual-agent query.
QueryParameters query_params = 9;
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 14;
// Additional parameters to be put into Dialogflow CX session parameters. To
// remove a parameter from the session, clients should explicitly set the
// parameter value to null.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
google.protobuf.Struct cx_parameters = 18;
// A unique identifier for this request. Restricted to 36 ASCII characters.
// A random UUID is recommended.
// This request is only idempotent if a `request_id` is provided.
string request_id = 11;
}
// The message in the response that indicates the parameters of DTMF.
message DtmfParameters {
// Indicates whether DTMF input can be handled in the next request.
bool accepts_dtmf_input = 1;
}
// The response message for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent].
message AnalyzeContentResponse {
// The output text content.
// This field is set if the automated agent responded with text to show to
// the user.
string reply_text = 1;
// The audio data bytes encoded as specified in the request.
// This field is set if:
//
// - `reply_audio_config` was specified in the request, or
// - The automated agent responded with audio to play to the user. In such
// case, `reply_audio.config` contains settings used to synthesize the
// speech.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
OutputAudio reply_audio = 2;
// Only set if a Dialogflow automated agent has responded.
// Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
// and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
// are always empty, use
// [reply_audio][google.cloud.dialogflow.v2.AnalyzeContentResponse.reply_audio]
// instead.
AutomatedAgentReply automated_agent_reply = 3;
// Message analyzed by CCAI.
Message message = 5;
// The suggestions for most recent human agent. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
//
// Note that any failure of Agent Assist features will not lead to the overall
// failure of an AnalyzeContent API call. Instead, the features will
// fail silently with the error field set in the corresponding
// SuggestionResult.
repeated SuggestionResult human_agent_suggestion_results = 6;
// The suggestions for end user. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
//
// Same as human_agent_suggestion_results, any failure of Agent Assist
// features will not lead to the overall failure of an AnalyzeContent API
// call. Instead, the features will fail silently with the error field set in
// the corresponding SuggestionResult.
repeated SuggestionResult end_user_suggestion_results = 7;
// Indicates the parameters of DTMF.
DtmfParameters dtmf_parameters = 9;
}
// The top-level message sent by the client to the
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent]
// method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [participant][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.participant],
// [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config]
// and optionally
// [query_params][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.query_params].
// If you want to receive an audio response, it should also contain
// [reply_audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.reply_audio_config].
// The message must not contain
// [input][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input].
//
// 2. If
// [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in
// the first message
// was set to
// [audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.audio_config],
// all subsequent messages must contain
// [input_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_audio]
// to continue with Speech recognition. However, note that:
//
// * Dialogflow will bill you for the audio so far.
// * Dialogflow discards all Speech recognition results in favor of the
// text input.
//
// 3. If
// [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config]
// in the first message was set
// to
// [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.text_config],
// then the second message must contain only
// [input_text][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_text].
// Moreover, you must not send more than two messages.
//
// After you sent all input, you must half-close or abort the request stream.
message StreamingAnalyzeContentRequest {
// Required. The name of the participant this text comes from.
// Format: `projects//locations//conversations//participants/`.
string participant = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// The input config.
oneof config {
// Instructs the speech recognizer how to process the speech audio.
InputAudioConfig audio_config = 2;
// The natural language text to be processed.
InputTextConfig text_config = 3;
}
// Speech synthesis configuration.
// The speech synthesis settings for a virtual agent that may be configured
// for the associated conversation profile are not used when calling
// StreamingAnalyzeContent. If this configuration is not supplied, speech
// synthesis is disabled.
OutputAudioConfig reply_audio_config = 4;
// The input.
oneof input {
// The input audio content to be recognized. Must be sent if `audio_config`
// is set in the first message. The complete audio over all streaming
// messages must not exceed 1 minute.
bytes input_audio = 5;
// The UTF-8 encoded natural language text to be processed. Must be sent if
// `text_config` is set in the first message. Text length must not exceed
// 256 bytes for virtual agent interactions. The `input_text` field can be
// only sent once, and would cancel the speech recognition if any ongoing.
string input_text = 6;
// The DTMF digits used to invoke intent and fill in parameter value.
//
// This input is ignored if the previous response indicated that DTMF input
// is not accepted.
TelephonyDtmfEvents input_dtmf = 9;
}
// Parameters for a Dialogflow virtual-agent query.
QueryParameters query_params = 7;
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 8;
// Additional parameters to be put into Dialogflow CX session parameters. To
// remove a parameter from the session, clients should explicitly set the
// parameter value to null.
//
// Note: this field should only be used if you are connecting to a Dialogflow
// CX agent.
google.protobuf.Struct cx_parameters = 13;
// Optional. Enable full bidirectional streaming. You can keep streaming the
// audio until timeout, and there's no need to half close the stream to get
// the response.
//
// Restrictions:
//
// - Timeout: 3 mins.
// - Audio Encoding: only supports
// [AudioEncoding.AUDIO_ENCODING_LINEAR_16][google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_LINEAR_16]
// and
// [AudioEncoding.AUDIO_ENCODING_MULAW][google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_MULAW]
// - Lifecycle: conversation should be in `Assist Stage`, go to
// [Conversation.CreateConversation][] for more information.
//
// InvalidArgument Error will be returned if the one of restriction checks
// failed.
//
// You can find more details in
// https://cloud.google.com/agent-assist/docs/extended-streaming
bool enable_extended_streaming = 11 [(google.api.field_behavior) = OPTIONAL];
// Enable partial virtual agent responses. If this flag is not enabled,
// response stream still contains only one final response even if some
// `Fulfillment`s in Dialogflow virtual agent have been configured to return
// partial responses.
bool enable_partial_automated_agent_reply = 12;
// If true, `StreamingAnalyzeContentResponse.debugging_info` will get
// populated.
bool enable_debugging_info = 19;
}
// The top-level message returned from the `StreamingAnalyzeContent` method.
//
// Multiple response messages can be returned in order:
//
// 1. If the input was set to streaming audio, the first one or more messages
// contain `recognition_result`. Each `recognition_result` represents a more
// complete transcript of what the user said. The last `recognition_result`
// has `is_final` set to `true`.
//
// 2. In virtual agent stage: if `enable_partial_automated_agent_reply` is
// true, the following N (currently 1 <= N <= 4) messages
// contain `automated_agent_reply` and optionally `reply_audio`
// returned by the virtual agent. The first (N-1)
// `automated_agent_reply`s will have `automated_agent_reply_type` set to
// `PARTIAL`. The last `automated_agent_reply` has
// `automated_agent_reply_type` set to `FINAL`.
// If `enable_partial_automated_agent_reply` is not enabled, response stream
// only contains the final reply.
//
// In human assist stage: the following N (N >= 1) messages contain
// `human_agent_suggestion_results`, `end_user_suggestion_results` or
// `message`.
message StreamingAnalyzeContentResponse {
// The result of speech recognition.
StreamingRecognitionResult recognition_result = 1;
// The output text content.
// This field is set if an automated agent responded with a text for the user.
string reply_text = 2;
// The audio data bytes encoded as specified in the request.
// This field is set if:
//
// - The `reply_audio_config` field is specified in the request.
// - The automated agent, which this output comes from, responded with audio.
// In such case, the `reply_audio.config` field contains settings used to
// synthesize the speech.
//
// In some scenarios, multiple output audio fields may be present in the
// response structure. In these cases, only the top-most-level audio output
// has content.
OutputAudio reply_audio = 3;
// Only set if a Dialogflow automated agent has responded.
// Note that: [AutomatedAgentReply.detect_intent_response.output_audio][]
// and [AutomatedAgentReply.detect_intent_response.output_audio_config][]
// are always empty, use
// [reply_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentResponse.reply_audio]
// instead.
AutomatedAgentReply automated_agent_reply = 4;
// Message analyzed by CCAI.
Message message = 6;
// The suggestions for most recent human agent. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config].
repeated SuggestionResult human_agent_suggestion_results = 7;
// The suggestions for end user. The order is the same as
// [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs]
// of
// [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config].
repeated SuggestionResult end_user_suggestion_results = 8;
// Indicates the parameters of DTMF.
DtmfParameters dtmf_parameters = 10;
// Debugging info that would get populated when
// `StreamingAnalyzeContentRequest.enable_debugging_info` is set to true.
CloudConversationDebuggingInfo debugging_info = 11;
}
// The request message for
// [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
message SuggestArticlesRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestArticlesRequest.latest_message]
// to use as context when compiling the suggestion. By default 20 and at
// most 50.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 4;
}
// The response message for
// [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles].
message SuggestArticlesResponse {
// Articles ordered by score in descending order.
repeated ArticleAnswer article_answers = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestArticlesResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestArticlesRequest.context_size][google.cloud.dialogflow.v2.SuggestArticlesRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// The request message for
// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
message SuggestFaqAnswersRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message] to use as context when compiling the
// suggestion. By default 20 and at most 50.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Parameters for a human assist query.
AssistQueryParameters assist_query_params = 4;
}
// The request message for
// [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers].
message SuggestFaqAnswersResponse {
// Answers extracted from FAQ documents.
repeated FaqAnswer faq_answers = 1;
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestFaqAnswersResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2.SuggestFaqAnswersRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// The request message for
// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
message SuggestSmartRepliesRequest {
// Required. The name of the participant to fetch suggestion for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// The current natural language text segment to compile suggestion
// for. This provides a way for user to get follow up smart reply suggestion
// after a smart reply selection, without sending a text message.
TextInput current_text_input = 4;
// The name of the latest conversation message to compile suggestion
// for. If empty, it will be the latest message of the conversation.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}];
// Max number of messages prior to and including
// [latest_message] to use as context when compiling the
// suggestion. By default 20 and at most 50.
int32 context_size = 3;
}
// The response message for
// [Participants.SuggestSmartReplies][google.cloud.dialogflow.v2.Participants.SuggestSmartReplies].
message SuggestSmartRepliesResponse {
// Output only. Multiple reply options provided by smart reply service. The
// order is based on the rank of the model prediction.
// The maximum number of the returned replies is set in SmartReplyConfig.
repeated SmartReplyAnswer smart_reply_answers = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// The name of the latest conversation message used to compile
// suggestion for.
//
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}];
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestSmartRepliesResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestSmartRepliesRequest.context_size][google.cloud.dialogflow.v2.SuggestSmartRepliesRequest.context_size]
// field in the request if there aren't that many messages in the
// conversation.
int32 context_size = 3;
}
// Represents the natural language speech audio to be played to the end user.
message OutputAudio {
// Instructs the speech synthesizer how to generate the speech
// audio.
OutputAudioConfig config = 1;
// The natural language speech audio.
bytes audio = 2;
}
// Represents a response from an automated agent.
message AutomatedAgentReply {
// Represents different automated agent reply types.
enum AutomatedAgentReplyType {
// Not specified. This should never happen.
AUTOMATED_AGENT_REPLY_TYPE_UNSPECIFIED = 0;
// Partial reply. e.g. Aggregated responses in a `Fulfillment` that enables
// `return_partial_response` can be returned as partial reply.
// WARNING: partial reply is not eligible for barge-in.
PARTIAL = 1;
// Final reply.
FINAL = 2;
}
// Response of the Dialogflow
// [Sessions.DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent]
// call.
DetectIntentResponse detect_intent_response = 1;
// AutomatedAgentReply type.
AutomatedAgentReplyType automated_agent_reply_type = 7;
// Indicates whether the partial automated agent reply is interruptible when a
// later reply message arrives. e.g. if the agent specified some music as
// partial response, it can be cancelled.
bool allow_cancellation = 8;
// The unique identifier of the current Dialogflow CX conversation page.
// Format: `projects//locations//agents//flows//pages/`.
string cx_current_page = 11;
}
// Represents article answer.
message ArticleAnswer {
// The article title.
string title = 1;
// The article URI.
string uri = 2;
// Article snippets.
repeated string snippets = 3;
// Article match confidence.
// The system's confidence score that this article is a good match for this
// conversation, as a value from 0.0 (completely uncertain) to 1.0
// (completely certain).
float confidence = 4;
// A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Represents answer from "frequently asked questions".
message FaqAnswer {
// The piece of text from the `source` knowledge base document.
string answer = 1;
// The system's confidence score that this Knowledge answer is a good match
// for this conversational query, range from 0.0 (completely uncertain)
// to 1.0 (completely certain).
float confidence = 2;
// The corresponding FAQ question.
string question = 3;
// Indicates which Knowledge Document this answer was extracted
// from.
// Format: `projects//locations//agent/knowledgeBases//documents/`.
string source = 4;
// A map that contains metadata about the answer and the
// document from which it originates.
map metadata = 5;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 6;
}
// Represents a smart reply answer.
message SmartReplyAnswer {
// The content of the reply.
string reply = 1;
// Smart reply confidence.
// The system's confidence score that this reply is a good match for
// this conversation, as a value from 0.0 (completely uncertain) to 1.0
// (completely certain).
float confidence = 2;
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 3 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/AnswerRecord"
}];
}
// Represents an intent suggestion.
message IntentSuggestion {
// The display name of the intent.
string display_name = 1;
// The name of the intent.
oneof intent {
// The unique identifier of this
// [intent][google.cloud.dialogflow.v2.Intent]. Format: `projects//locations//agent/intents/`.
string intent_v2 = 2;
}
// Human readable description for better understanding an intent like its
// scope, content, result etc. Maximum character limit: 140 characters.
string description = 5;
}
// Represents a Dialogflow assist answer.
message DialogflowAssistAnswer {
// Result from DetectIntent for one matched intent.
oneof result {
// Result from v2 agent.
QueryResult query_result = 1;
// An intent suggestion generated from conversation.
IntentSuggestion intent_suggestion = 5;
}
// The name of answer record, in the format of
// "projects//locations//answerRecords/"
string answer_record = 2;
}
// One response of different type of suggestion response which is used in
// the response of
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
// and
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent],
// as well as
// [HumanAgentAssistantEvent][google.cloud.dialogflow.v2.HumanAgentAssistantEvent].
message SuggestionResult {
// Different type of suggestion response.
oneof suggestion_response {
// Error status if the request failed.
google.rpc.Status error = 1;
// SuggestArticlesResponse if request is for ARTICLE_SUGGESTION.
SuggestArticlesResponse suggest_articles_response = 2;
// SuggestKnowledgeAssistResponse if request is for KNOWLEDGE_ASSIST.
SuggestKnowledgeAssistResponse suggest_knowledge_assist_response = 8;
// SuggestFaqAnswersResponse if request is for FAQ_ANSWER.
SuggestFaqAnswersResponse suggest_faq_answers_response = 3;
// SuggestSmartRepliesResponse if request is for SMART_REPLY.
SuggestSmartRepliesResponse suggest_smart_replies_response = 4;
}
}
// Defines the language used in the input text.
message InputTextConfig {
// Required. The language of this conversational query. See [Language
// Support](https://cloud.google.com/dialogflow/docs/reference/language)
// for a list of the currently supported language codes.
string language_code = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents a part of a message possibly annotated with an entity. The part
// can be an entity or purely a part of the message between two entities or
// message start/end.
message AnnotatedMessagePart {
// A part of a message possibly annotated with an entity.
string text = 1;
// The [Dialogflow system entity
// type](https://cloud.google.com/dialogflow/docs/reference/system-entities)
// of this message part. If this is empty, Dialogflow could not annotate the
// phrase part with a system entity.
string entity_type = 2;
// The [Dialogflow system entity formatted value
// ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of
// this message part. For example for a system entity of type
// `@sys.unit-currency`, this may contain:
//
// {
// "amount": 5,
// "currency": "USD"
// }
//
google.protobuf.Value formatted_value = 3;
}
// Represents the result of annotation for the message.
message MessageAnnotation {
// The collection of annotated message parts ordered by their
// position in the message. You can recover the annotated message by
// concatenating [AnnotatedMessagePart.text].
repeated AnnotatedMessagePart parts = 1;
// Indicates whether the text message contains entities.
bool contain_entities = 2;
}
// Represents the selection of a suggestion.
message SuggestionInput {
// Required. The ID of a suggestion selected by the human agent.
// The suggestion(s) were generated in a previous call to
// request Dialogflow assist.
// The format is:
// `projects//locations//answerRecords/` where is an alphanumeric string.
string answer_record = 1 [(google.api.field_behavior) = REQUIRED];
}
// Represents the parameters of human assist query.
message AssistQueryParameters {
// Key-value filters on the metadata of documents returned by article
// suggestion. If specified, article suggestion only returns suggested
// documents that match all filters in their
// [Document.metadata][google.cloud.dialogflow.v2.Document.metadata]. Multiple
// values for a metadata key should be concatenated by comma. For example,
// filters to match all documents that have 'US' or 'CA' in their market
// metadata values and 'agent' in their user metadata values will be
// ```
// documents_metadata_filters {
// key: "market"
// value: "US,CA"
// }
// documents_metadata_filters {
// key: "user"
// value: "agent"
// }
// ```
map documents_metadata_filters = 1;
}
// The request message for
// [Participants.SuggestKnowledgeAssist][google.cloud.dialogflow.v2.Participants.SuggestKnowledgeAssist].
message SuggestKnowledgeAssistRequest {
// Required. The name of the participant to fetch suggestions for.
// Format: `projects//locations//conversations//participants/`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}
];
// Optional. The name of the latest conversation message to compile
// suggestions for. If empty, it will be the latest message of the
// conversation. Format: `projects//locations//conversations//messages/`.
string latest_message = 2 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Message"
}
];
// Optional. Max number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestKnowledgeAssistRequest.latest_message]
// to use as context when compiling the suggestion. The context size is by
// default 100 and at most 100.
int32 context_size = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. The previously suggested query for the given conversation. This
// helps identify whether the next suggestion we generate is resonably
// different from the previous one. This is useful to avoid similar
// suggestions within the conversation.
string previous_suggested_query = 4 [(google.api.field_behavior) = OPTIONAL];
}
// The response message for
// [Participants.SuggestKnowledgeAssist][google.cloud.dialogflow.v2.Participants.SuggestKnowledgeAssist].
message SuggestKnowledgeAssistResponse {
// Output only. Knowledge Assist suggestion.
KnowledgeAssistAnswer knowledge_assist_answer = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// The name of the latest conversation message used to compile suggestion for.
// Format: `projects//locations//conversations//messages/`.
string latest_message = 2;
// Number of messages prior to and including
// [latest_message][google.cloud.dialogflow.v2.SuggestKnowledgeAssistResponse.latest_message]
// to compile the suggestion. It may be smaller than the
// [SuggestKnowledgeAssistRequest.context_size][google.cloud.dialogflow.v2.SuggestKnowledgeAssistRequest.context_size]
// field in the request if there are fewer messages in the conversation.
int32 context_size = 3;
}
// Represents a Knowledge Assist answer.
message KnowledgeAssistAnswer {
// Represents a suggested query.
message SuggestedQuery {
// Suggested query text.
string query_text = 1;
}
// Represents an answer from Knowledge. Currently supports FAQ and Generative
// answers.
message KnowledgeAnswer {
// Details about source of FAQ answer.
message FaqSource {
// The corresponding FAQ question.
string question = 2;
}
// Details about source of Generative answer.
message GenerativeSource {
// Snippet Source for a Generative Prediction.
message Snippet {
// URI the data is sourced from.
string uri = 2;
// Text taken from that URI.
string text = 3;
// Title of the document.
string title = 4;
}
// All snippets used for this Generative Prediction, with their source URI
// and data.
repeated Snippet snippets = 1;
}
// The piece of text from the `source` that answers this suggested query.
string answer_text = 1;
// Source of result.
oneof source {
// Populated if the prediction came from FAQ.
FaqSource faq_source = 3;
// Populated if the prediction was Generative.
GenerativeSource generative_source = 4;
}
}
// The query suggested based on the context. Suggestion is made only if it
// is different from the previous suggestion.
SuggestedQuery suggested_query = 1;
// The answer generated for the suggested query. Whether or not an answer is
// generated depends on how confident we are about the generated query.
KnowledgeAnswer suggested_query_answer = 2;
// The name of the answer record.
// Format: `projects//locations//answer
// Records/`.
string answer_record = 3;
}