google.cloud.contactcenterinsights.v1.resources.proto Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto-google-cloud-contact-center-insights-v1 Show documentation
Show all versions of proto-google-cloud-contact-center-insights-v1 Show documentation
Proto library for google-cloud-contact-center-insights
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.contactcenterinsights.v1;
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Cloud.ContactCenterInsights.V1";
option go_package = "cloud.google.com/go/contactcenterinsights/apiv1/contactcenterinsightspb;contactcenterinsightspb";
option java_multiple_files = true;
option java_outer_classname = "ResourcesProto";
option java_package = "com.google.cloud.contactcenterinsights.v1";
option php_namespace = "Google\\Cloud\\ContactCenterInsights\\V1";
option ruby_package = "Google::Cloud::ContactCenterInsights::V1";
option (google.api.resource_definition) = {
type: "dialogflow.googleapis.com/ConversationProfile"
pattern: "projects/{project}/locations/{location}/conversationProfiles/{conversation_profile}"
};
option (google.api.resource_definition) = {
type: "dialogflow.googleapis.com/Participant"
pattern: "projects/{project}/conversations/{conversation}/participants/{participant}"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}"
};
option (google.api.resource_definition) = {
type: "speech.googleapis.com/Recognizer"
pattern: "projects/{project}/locations/{location}/recognizers/{recognizer}"
};
// The conversation resource.
message Conversation {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/Conversation"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}"
};
// Call-specific metadata.
message CallMetadata {
// The audio channel that contains the customer.
int32 customer_channel = 1;
// The audio channel that contains the agent.
int32 agent_channel = 2;
}
// A message representing the transcript of a conversation.
message Transcript {
// A segment of a full transcript.
message TranscriptSegment {
// Word-level info for words in a transcript.
message WordInfo {
// Time offset of the start of this word relative to the beginning of
// the total conversation.
google.protobuf.Duration start_offset = 1;
// Time offset of the end of this word relative to the beginning of the
// total conversation.
google.protobuf.Duration end_offset = 2;
// The word itself. Includes punctuation marks that surround the word.
string word = 3;
// A confidence estimate between 0.0 and 1.0 of the fidelity of this
// word. A default value of 0.0 indicates that the value is unset.
float confidence = 4;
}
// Metadata from Dialogflow relating to the current transcript segment.
message DialogflowSegmentMetadata {
// Whether the transcript segment was covered under the configured smart
// reply allowlist in Agent Assist.
bool smart_reply_allowlist_covered = 1;
}
// The time that the message occurred, if provided.
google.protobuf.Timestamp message_time = 6;
// The text of this segment.
string text = 1;
// A confidence estimate between 0.0 and 1.0 of the fidelity of this
// segment. A default value of 0.0 indicates that the value is unset.
float confidence = 2;
// A list of the word-specific information for each word in the segment.
repeated WordInfo words = 3;
// The language code of this segment as a
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// Example: "en-US".
string language_code = 4;
// For conversations derived from multi-channel audio, this is the channel
// number corresponding to the audio from that channel. For
// audioChannelCount = N, its output values can range from '1' to 'N'. A
// channel tag of 0 indicates that the audio is mono.
int32 channel_tag = 5;
// The participant of this segment.
ConversationParticipant segment_participant = 9;
// CCAI metadata relating to the current transcript segment.
DialogflowSegmentMetadata dialogflow_segment_metadata = 10;
// The sentiment for this transcript segment.
SentimentData sentiment = 11;
}
// A list of sequential transcript segments that comprise the conversation.
repeated TranscriptSegment transcript_segments = 1;
}
// Possible media for the conversation.
enum Medium {
// Default value, if unspecified will default to PHONE_CALL.
MEDIUM_UNSPECIFIED = 0;
// The format for conversations that took place over the phone.
PHONE_CALL = 1;
// The format for conversations that took place over chat.
CHAT = 2;
}
// Metadata that applies to the conversation.
oneof metadata {
// Call-specific metadata.
CallMetadata call_metadata = 7;
}
// A time to live expiration setting, can be either a specified timestamp or a
// duration from the time that the conversation creation request was received.
// Conversations with an expiration set will be removed up to 24 hours after
// the specified time.
oneof expiration {
// The time at which this conversation should expire. After this time, the
// conversation data and any associated analyses will be deleted.
google.protobuf.Timestamp expire_time = 15;
// Input only. The TTL for this resource. If specified, then this TTL will
// be used to calculate the expire time.
google.protobuf.Duration ttl = 16
[(google.api.field_behavior) = INPUT_ONLY];
}
// Immutable. The resource name of the conversation.
// Format:
// projects/{project}/locations/{location}/conversations/{conversation}
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// The source of the audio and transcription for the conversation.
ConversationDataSource data_source = 2;
// Output only. The time at which the conversation was created.
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The most recent time at which the conversation was updated.
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// The time at which the conversation started.
google.protobuf.Timestamp start_time = 17;
// A user-specified language code for the conversation.
string language_code = 14;
// An opaque, user-specified string representing the human agent who handled
// the conversation.
string agent_id = 5;
// A map for the user to specify any custom fields. A maximum of 20 labels per
// conversation is allowed, with a maximum of 256 characters per entry.
map labels = 6;
// Output only. The conversation transcript.
Transcript transcript = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Immutable. The conversation medium, if unspecified will default to
// PHONE_CALL.
Medium medium = 9 [(google.api.field_behavior) = IMMUTABLE];
// Output only. The duration of the conversation.
google.protobuf.Duration duration = 10
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The number of turns in the conversation.
int32 turn_count = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The conversation's latest analysis, if one exists.
Analysis latest_analysis = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Latest summary of the conversation.
ConversationSummarizationSuggestionData latest_summary = 20
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The annotations that were generated during the customer and
// agent interaction.
repeated RuntimeAnnotation runtime_annotations = 13
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. All the matched Dialogflow intents in the call. The key
// corresponds to a Dialogflow intent, format:
// projects/{project}/agent/{agent}/intents/{intent}
map dialogflow_intents = 18
[(google.api.field_behavior) = OUTPUT_ONLY];
// Obfuscated user ID which the customer sent to us.
string obfuscated_user_id = 21;
}
// The analysis resource.
message Analysis {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/Analysis"
pattern: "projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}"
};
// Immutable. The resource name of the analysis.
// Format:
// projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// Output only. The time at which the analysis was requested.
google.protobuf.Timestamp request_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The time at which the analysis was created, which occurs when
// the long-running operation completes.
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The result of the analysis, which is populated when the
// analysis finishes.
AnalysisResult analysis_result = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// To select the annotators to run and the phrase matchers to use
// (if any). If not specified, all annotators will be run.
AnnotatorSelector annotator_selector = 8;
}
// The conversation source, which is a combination of transcript and audio.
message ConversationDataSource {
// The source of the conversation.
oneof source {
// A Cloud Storage location specification for the audio and transcript.
GcsSource gcs_source = 1;
// The source when the conversation comes from Dialogflow.
DialogflowSource dialogflow_source = 3;
}
}
// A Cloud Storage source of conversation data.
message GcsSource {
// Cloud Storage URI that points to a file that contains the conversation
// audio.
string audio_uri = 1;
// Immutable. Cloud Storage URI that points to a file that contains the
// conversation transcript.
string transcript_uri = 2 [(google.api.field_behavior) = IMMUTABLE];
}
// A Dialogflow source of conversation data.
message DialogflowSource {
// Output only. The name of the Dialogflow conversation that this conversation
// resource is derived from. Format:
// projects/{project}/locations/{location}/conversations/{conversation}
string dialogflow_conversation = 1
[(google.api.field_behavior) = OUTPUT_ONLY];
// Cloud Storage URI that points to a file that contains the conversation
// audio.
string audio_uri = 3;
}
// The result of an analysis.
message AnalysisResult {
// Call-specific metadata created during analysis.
message CallAnalysisMetadata {
// A list of call annotations that apply to this call.
repeated CallAnnotation annotations = 2;
// All the entities in the call.
map entities = 3;
// Overall conversation-level sentiment for each channel of the call.
repeated ConversationLevelSentiment sentiments = 4;
// All the matched intents in the call.
map intents = 6;
// All the matched phrase matchers in the call.
map phrase_matchers = 7;
// Overall conversation-level issue modeling result.
IssueModelResult issue_model_result = 8;
}
// Metadata discovered during analysis.
oneof metadata {
// Call-specific metadata created by the analysis.
CallAnalysisMetadata call_analysis_metadata = 2;
}
// The time at which the analysis ended.
google.protobuf.Timestamp end_time = 1;
}
// Issue Modeling result on a conversation.
message IssueModelResult {
// Issue model that generates the result.
// Format: projects/{project}/locations/{location}/issueModels/{issue_model}
string issue_model = 1 [(google.api.resource_reference) = {
type: "contactcenterinsights.googleapis.com/IssueModel"
}];
// All the matched issues.
repeated IssueAssignment issues = 2;
}
// One channel of conversation-level sentiment data.
message ConversationLevelSentiment {
// The channel of the audio that the data applies to.
int32 channel_tag = 1;
// Data specifying sentiment.
SentimentData sentiment_data = 2;
}
// Information about the issue.
message IssueAssignment {
// Resource name of the assigned issue.
string issue = 1;
// Score indicating the likelihood of the issue assignment.
// currently bounded on [0,1].
double score = 2;
// Immutable. Display name of the assigned issue. This field is set at time of
// analyis and immutable since then.
string display_name = 3 [(google.api.field_behavior) = IMMUTABLE];
}
// A piece of metadata that applies to a window of a call.
message CallAnnotation {
// The data in the annotation.
oneof data {
// Data specifying an interruption.
InterruptionData interruption_data = 10;
// Data specifying sentiment.
SentimentData sentiment_data = 11;
// Data specifying silence.
SilenceData silence_data = 12;
// Data specifying a hold.
HoldData hold_data = 13;
// Data specifying an entity mention.
EntityMentionData entity_mention_data = 15;
// Data specifying an intent match.
IntentMatchData intent_match_data = 16;
// Data specifying a phrase match.
PhraseMatchData phrase_match_data = 17;
// Data specifying an issue match.
IssueMatchData issue_match_data = 18;
}
// The channel of the audio where the annotation occurs. For single-channel
// audio, this field is not populated.
int32 channel_tag = 1;
// The boundary in the conversation where the annotation starts, inclusive.
AnnotationBoundary annotation_start_boundary = 4;
// The boundary in the conversation where the annotation ends, inclusive.
AnnotationBoundary annotation_end_boundary = 5;
}
// A point in a conversation that marks the start or the end of an annotation.
message AnnotationBoundary {
// A detailed boundary, which describes a more specific point.
oneof detailed_boundary {
// The word index of this boundary with respect to the first word in the
// transcript piece. This index starts at zero.
int32 word_index = 3;
}
// The index in the sequence of transcribed pieces of the conversation where
// the boundary is located. This index starts at zero.
int32 transcript_index = 1;
}
// The data for an entity annotation.
// Represents a phrase in the conversation that is a known entity, such
// as a person, an organization, or location.
message Entity {
// The type of the entity. For most entity types, the associated metadata is a
// Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`). The table
// below lists the associated fields for entities that have different
// metadata.
enum Type {
// Unspecified.
TYPE_UNSPECIFIED = 0;
// Person.
PERSON = 1;
// Location.
LOCATION = 2;
// Organization.
ORGANIZATION = 3;
// Event.
EVENT = 4;
// Artwork.
WORK_OF_ART = 5;
// Consumer product.
CONSUMER_GOOD = 6;
// Other types of entities.
OTHER = 7;
// Phone number.
//
// The metadata lists the phone number (formatted according to local
// convention), plus whichever additional elements appear in the text:
//
// * `number` - The actual number, broken down into sections according to
// local convention.
// * `national_prefix` - Country code, if detected.
// * `area_code` - Region or area code, if detected.
// * `extension` - Phone extension (to be dialed after connection), if
// detected.
PHONE_NUMBER = 9;
// Address.
//
// The metadata identifies the street number and locality plus whichever
// additional elements appear in the text:
//
// * `street_number` - Street number.
// * `locality` - City or town.
// * `street_name` - Street/route name, if detected.
// * `postal_code` - Postal code, if detected.
// * `country` - Country, if detected.
// * `broad_region` - Administrative area, such as the state, if detected.
// * `narrow_region` - Smaller administrative area, such as county, if
// detected.
// * `sublocality` - Used in Asian addresses to demark a district within a
// city, if detected.
ADDRESS = 10;
// Date.
//
// The metadata identifies the components of the date:
//
// * `year` - Four digit year, if detected.
// * `month` - Two digit month number, if detected.
// * `day` - Two digit day number, if detected.
DATE = 11;
// Number.
//
// The metadata is the number itself.
NUMBER = 12;
// Price.
//
// The metadata identifies the `value` and `currency`.
PRICE = 13;
}
// The representative name for the entity.
string display_name = 1;
// The entity type.
Type type = 2;
// Metadata associated with the entity.
//
// For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`)
// and Knowledge Graph MID (`mid`), if they are available. For the metadata
// associated with other entity types, see the Type table below.
map metadata = 3;
// The salience score associated with the entity in the [0, 1.0] range.
//
// The salience score for an entity provides information about the
// importance or centrality of that entity to the entire document text.
// Scores closer to 0 are less salient, while scores closer to 1.0 are highly
// salient.
float salience = 4;
// The aggregate sentiment expressed for this entity in the conversation.
SentimentData sentiment = 5;
}
// The data for an intent.
// Represents a detected intent in the conversation, for example MAKES_PROMISE.
message Intent {
// The unique identifier of the intent.
string id = 1;
// The human-readable name of the intent.
string display_name = 2;
}
// The data for a matched phrase matcher.
// Represents information identifying a phrase matcher for a given match.
message PhraseMatchData {
// The unique identifier (the resource name) of the phrase matcher.
string phrase_matcher = 1;
// The human-readable name of the phrase matcher.
string display_name = 2;
}
// The data for a Dialogflow intent.
// Represents a detected intent in the conversation, e.g. MAKES_PROMISE.
message DialogflowIntent {
// The human-readable name of the intent.
string display_name = 1;
}
// The data for an interruption annotation.
message InterruptionData {}
// The data for a silence annotation.
message SilenceData {}
// The data for a hold annotation.
message HoldData {}
// The data for an entity mention annotation.
// This represents a mention of an `Entity` in the conversation.
message EntityMentionData {
// The supported types of mentions.
enum MentionType {
// Unspecified.
MENTION_TYPE_UNSPECIFIED = 0;
// Proper noun.
PROPER = 1;
// Common noun (or noun compound).
COMMON = 2;
}
// The key of this entity in conversation entities.
// Can be used to retrieve the exact `Entity` this mention is attached to.
string entity_unique_id = 1;
// The type of the entity mention.
MentionType type = 2;
// Sentiment expressed for this mention of the entity.
SentimentData sentiment = 3;
}
// The data for an intent match.
// Represents an intent match for a text segment in the conversation. A text
// segment can be part of a sentence, a complete sentence, or an utterance
// with multiple sentences.
message IntentMatchData {
// The id of the matched intent.
// Can be used to retrieve the corresponding intent information.
string intent_unique_id = 1;
}
// The data for a sentiment annotation.
message SentimentData {
// A non-negative number from 0 to infinity which represents the abolute
// magnitude of sentiment regardless of score.
float magnitude = 1;
// The sentiment score between -1.0 (negative) and 1.0 (positive).
float score = 2;
}
// The data for an issue match annotation.
message IssueMatchData {
// Information about the issue's assignment.
IssueAssignment issue_assignment = 1;
}
// The issue model resource.
message IssueModel {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/IssueModel"
pattern: "projects/{project}/locations/{location}/issueModels/{issue_model}"
};
// Configs for the input data used to create the issue model.
message InputDataConfig {
// Medium of conversations used in training data. This field is being
// deprecated. To specify the medium to be used in training a new issue
// model, set the `medium` field on `filter`.
Conversation.Medium medium = 1 [deprecated = true];
// Output only. Number of conversations used in training. Output only.
int64 training_conversations_count = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// A filter to reduce the conversations used for training the model to a
// specific subset.
string filter = 3;
}
// State of the model.
enum State {
// Unspecified.
STATE_UNSPECIFIED = 0;
// Model is not deployed but is ready to deploy.
UNDEPLOYED = 1;
// Model is being deployed.
DEPLOYING = 2;
// Model is deployed and is ready to be used. A model can only be used in
// analysis if it's in this state.
DEPLOYED = 3;
// Model is being undeployed.
UNDEPLOYING = 4;
// Model is being deleted.
DELETING = 5;
}
// Immutable. The resource name of the issue model.
// Format:
// projects/{project}/locations/{location}/issueModels/{issue_model}
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// The representative name for the issue model.
string display_name = 2;
// Output only. The time at which this issue model was created.
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The most recent time at which the issue model was updated.
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Number of issues in this issue model.
int64 issue_count = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. State of the model.
State state = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Configs for the input data that used to create the issue model.
InputDataConfig input_data_config = 6;
// Output only. Immutable. The issue model's label statistics on its training
// data.
IssueModelLabelStats training_stats = 7 [
(google.api.field_behavior) = OUTPUT_ONLY,
(google.api.field_behavior) = IMMUTABLE
];
}
// The issue resource.
message Issue {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/Issue"
pattern: "projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}"
};
// Immutable. The resource name of the issue.
// Format:
// projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// The representative name for the issue.
string display_name = 2;
// Output only. The time at which this issue was created.
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The most recent time that this issue was updated.
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Resource names of the sample representative utterances that
// match to this issue.
repeated string sample_utterances = 6
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// Aggregated statistics about an issue model.
message IssueModelLabelStats {
// Aggregated statistics about an issue.
message IssueStats {
// Issue resource.
// Format:
// projects/{project}/locations/{location}/issueModels/{issue_model}/issues/{issue}
string issue = 1;
// Number of conversations attached to the issue at this point in time.
int64 labeled_conversations_count = 2;
// Display name of the issue.
string display_name = 3;
}
// Number of conversations the issue model has analyzed at this point in time.
int64 analyzed_conversations_count = 1;
// Number of analyzed conversations for which no issue was applicable at this
// point in time.
int64 unclassified_conversations_count = 2;
// Statistics on each issue. Key is the issue's resource name.
map issue_stats = 3;
}
// The phrase matcher resource.
message PhraseMatcher {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/PhraseMatcher"
pattern: "projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}"
};
// Specifies how to combine each phrase match rule group to determine whether
// there is a match.
enum PhraseMatcherType {
// Unspecified.
PHRASE_MATCHER_TYPE_UNSPECIFIED = 0;
// Must meet all phrase match rule groups or there is no match.
ALL_OF = 1;
// If any of the phrase match rule groups are met, there is a match.
ANY_OF = 2;
}
// The resource name of the phrase matcher.
// Format:
// projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}
string name = 1;
// Output only. Immutable. The revision ID of the phrase matcher.
// A new revision is committed whenever the matcher is changed, except when it
// is activated or deactivated. A server generated random ID will be used.
// Example: locations/global/phraseMatchers/my-first-matcher@1234567
string revision_id = 2 [
(google.api.field_behavior) = IMMUTABLE,
(google.api.field_behavior) = OUTPUT_ONLY
];
// The customized version tag to use for the phrase matcher. If not specified,
// it will default to `revision_id`.
string version_tag = 3;
// Output only. The timestamp of when the revision was created. It is also the
// create time when a new matcher is added.
google.protobuf.Timestamp revision_create_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// The human-readable name of the phrase matcher.
string display_name = 5;
// Required. The type of this phrase matcher.
PhraseMatcherType type = 6 [(google.api.field_behavior) = REQUIRED];
// Applies the phrase matcher only when it is active.
bool active = 7;
// A list of phase match rule groups that are included in this matcher.
repeated PhraseMatchRuleGroup phrase_match_rule_groups = 8;
// Output only. The most recent time at which the activation status was
// updated.
google.protobuf.Timestamp activation_update_time = 9
[(google.api.field_behavior) = OUTPUT_ONLY];
// The role whose utterances the phrase matcher should be matched
// against. If the role is ROLE_UNSPECIFIED it will be matched against any
// utterances in the transcript.
ConversationParticipant.Role role_match = 10;
// Output only. The most recent time at which the phrase matcher was updated.
google.protobuf.Timestamp update_time = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
}
// A message representing a rule in the phrase matcher.
message PhraseMatchRuleGroup {
// Specifies how to combine each phrase match rule for whether there is a
// match.
enum PhraseMatchRuleGroupType {
// Unspecified.
PHRASE_MATCH_RULE_GROUP_TYPE_UNSPECIFIED = 0;
// Must meet all phrase match rules or there is no match.
ALL_OF = 1;
// If any of the phrase match rules are met, there is a match.
ANY_OF = 2;
}
// Required. The type of this phrase match rule group.
PhraseMatchRuleGroupType type = 1 [(google.api.field_behavior) = REQUIRED];
// A list of phrase match rules that are included in this group.
repeated PhraseMatchRule phrase_match_rules = 2;
}
// The data for a phrase match rule.
message PhraseMatchRule {
// Required. The phrase to be matched.
string query = 1 [(google.api.field_behavior) = REQUIRED];
// Specifies whether the phrase must be missing from the transcript segment or
// present in the transcript segment.
bool negated = 2;
// Provides additional information about the rule that specifies how to apply
// the rule.
PhraseMatchRuleConfig config = 3;
}
// Configuration information of a phrase match rule.
message PhraseMatchRuleConfig {
// The configuration of the phrase match rule.
oneof config {
// The configuration for the exact match rule.
ExactMatchConfig exact_match_config = 1;
}
}
// Exact match configuration.
message ExactMatchConfig {
// Whether to consider case sensitivity when performing an exact match.
bool case_sensitive = 1;
}
// The settings resource.
message Settings {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/Settings"
pattern: "projects/{project}/locations/{location}/settings"
};
// Default configuration when creating Analyses in Insights.
message AnalysisConfig {
// Percentage of conversations created using Dialogflow runtime integration
// to analyze automatically, between [0, 100].
double runtime_integration_analysis_percentage = 1;
// Percentage of conversations created using the UploadConversation endpoint
// to analyze automatically, between [0, 100].
double upload_conversation_analysis_percentage = 6;
// To select the annotators to run and the phrase matchers to use
// (if any). If not specified, all annotators will be run.
AnnotatorSelector annotator_selector = 5;
}
// Immutable. The resource name of the settings resource.
// Format:
// projects/{project}/locations/{location}/settings
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// Output only. The time at which the settings was created.
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The time at which the settings were last updated.
google.protobuf.Timestamp update_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// A language code to be applied to each transcript segment unless the segment
// already specifies a language code. Language code defaults to "en-US" if it
// is neither specified on the segment nor here.
string language_code = 4;
// The default TTL for newly-created conversations. If a conversation has a
// specified expiration, that value will be used instead. Changing this
// value will not change the expiration of existing conversations.
// Conversations with no expire time persist until they are deleted.
google.protobuf.Duration conversation_ttl = 5;
// A map that maps a notification trigger to a Pub/Sub topic. Each time a
// specified trigger occurs, Insights will notify the corresponding Pub/Sub
// topic.
//
// Keys are notification triggers. Supported keys are:
//
// * "all-triggers": Notify each time any of the supported triggers occurs.
// * "create-analysis": Notify each time an analysis is created.
// * "create-conversation": Notify each time a conversation is created.
// * "export-insights-data": Notify each time an export is complete.
// * "update-conversation": Notify each time a conversation is updated via
// UpdateConversation.
//
// Values are Pub/Sub topics. The format of each Pub/Sub topic is:
// projects/{project}/topics/{topic}
map pubsub_notification_settings = 6;
// Default analysis settings.
AnalysisConfig analysis_config = 7;
// Default DLP redaction resources to be applied while ingesting
// conversations.
RedactionConfig redaction_config = 10;
}
// DLP resources used for redaction while ingesting conversations.
message RedactionConfig {
// The fully-qualified DLP deidentify template resource name.
// Format:
// `projects/{project}/deidentifyTemplates/{template}`
string deidentify_template = 1;
// The fully-qualified DLP inspect template resource name.
// Format:
// `projects/{project}/inspectTemplates/{template}`
string inspect_template = 2;
}
// An annotation that was generated during the customer and agent interaction.
message RuntimeAnnotation {
// The data in the annotation.
oneof data {
// Agent Assist Article Suggestion data.
ArticleSuggestionData article_suggestion = 6;
// Agent Assist FAQ answer data.
FaqAnswerData faq_answer = 7;
// Agent Assist Smart Reply data.
SmartReplyData smart_reply = 8;
// Agent Assist Smart Compose suggestion data.
SmartComposeSuggestionData smart_compose_suggestion = 9;
// Dialogflow interaction data.
DialogflowInteractionData dialogflow_interaction = 10;
// Conversation summarization suggestion data.
ConversationSummarizationSuggestionData
conversation_summarization_suggestion = 12;
}
// The unique identifier of the annotation.
// Format:
// projects/{project}/locations/{location}/conversationDatasets/{dataset}/conversationDataItems/{data_item}/conversationAnnotations/{annotation}
string annotation_id = 1;
// The time at which this annotation was created.
google.protobuf.Timestamp create_time = 2;
// The boundary in the conversation where the annotation starts, inclusive.
AnnotationBoundary start_boundary = 3;
// The boundary in the conversation where the annotation ends, inclusive.
AnnotationBoundary end_boundary = 4;
// The feedback that the customer has about the answer in `data`.
AnswerFeedback answer_feedback = 5;
}
// The feedback that the customer has about a certain answer in the
// conversation.
message AnswerFeedback {
// The correctness level of an answer.
enum CorrectnessLevel {
// Correctness level unspecified.
CORRECTNESS_LEVEL_UNSPECIFIED = 0;
// Answer is totally wrong.
NOT_CORRECT = 1;
// Answer is partially correct.
PARTIALLY_CORRECT = 2;
// Answer is fully correct.
FULLY_CORRECT = 3;
}
// The correctness level of an answer.
CorrectnessLevel correctness_level = 1;
// Indicates whether an answer or item was clicked by the human agent.
bool clicked = 2;
// Indicates whether an answer or item was displayed to the human agent in the
// agent desktop UI.
bool displayed = 3;
}
// Agent Assist Article Suggestion data.
message ArticleSuggestionData {
// Article title.
string title = 1;
// Article URI.
string uri = 2;
// The system's confidence score that this article is a good match for this
// conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
// certain).
float confidence_score = 3;
// Map that contains metadata about the Article Suggestion and the document
// that it originates from.
map metadata = 4;
// The name of the answer record.
// Format:
// projects/{project}/locations/{location}/answerRecords/{answer_record}
string query_record = 5;
// The knowledge document that this answer was extracted from.
// Format:
// projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}
string source = 6;
}
// Agent Assist frequently-asked-question answer data.
message FaqAnswerData {
// The piece of text from the `source` knowledge base document.
string answer = 1;
// The system's confidence score that this answer is a good match for this
// conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
// certain).
float confidence_score = 2;
// The corresponding FAQ question.
string question = 3;
// Map that contains metadata about the FAQ answer and the document that
// it originates from.
map metadata = 4;
// The name of the answer record.
// Format:
// projects/{project}/locations/{location}/answerRecords/{answer_record}
string query_record = 5;
// The knowledge document that this answer was extracted from.
// Format:
// projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}.
string source = 6;
}
// Agent Assist Smart Reply data.
message SmartReplyData {
// The content of the reply.
string reply = 1;
// The system's confidence score that this reply is a good match for this
// conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
// certain).
double confidence_score = 2;
// Map that contains metadata about the Smart Reply and the document from
// which it originates.
map metadata = 3;
// The name of the answer record.
// Format:
// projects/{project}/locations/{location}/answerRecords/{answer_record}
string query_record = 4;
}
// Agent Assist Smart Compose suggestion data.
message SmartComposeSuggestionData {
// The content of the suggestion.
string suggestion = 1;
// The system's confidence score that this suggestion is a good match for this
// conversation, ranging from 0.0 (completely uncertain) to 1.0 (completely
// certain).
double confidence_score = 2;
// Map that contains metadata about the Smart Compose suggestion and the
// document from which it originates.
map metadata = 3;
// The name of the answer record.
// Format:
// projects/{project}/locations/{location}/answerRecords/{answer_record}
string query_record = 4;
}
// Dialogflow interaction data.
message DialogflowInteractionData {
// The Dialogflow intent resource path. Format:
// projects/{project}/agent/{agent}/intents/{intent}
string dialogflow_intent_id = 1;
// The confidence of the match ranging from 0.0 (completely uncertain) to 1.0
// (completely certain).
float confidence = 2;
}
// Conversation summarization suggestion data.
message ConversationSummarizationSuggestionData {
// The summarization content that is concatenated into one string.
string text = 1;
// The summarization content that is divided into sections. The key is the
// section's name and the value is the section's content. There is no
// specific format for the key or value.
map text_sections = 5;
// The confidence score of the summarization.
float confidence = 2;
// A map that contains metadata about the summarization and the document
// from which it originates.
map metadata = 3;
// The name of the answer record.
// Format:
// projects/{project}/locations/{location}/answerRecords/{answer_record}
string answer_record = 4;
// The name of the model that generates this summary.
// Format:
// projects/{project}/locations/{location}/conversationModels/{conversation_model}
string conversation_model = 6;
}
// The call participant speaking for a given utterance.
message ConversationParticipant {
// The role of the participant.
enum Role {
// Participant's role is not set.
ROLE_UNSPECIFIED = 0;
// Participant is a human agent.
HUMAN_AGENT = 1;
// Participant is an automated agent.
AUTOMATED_AGENT = 2;
// Participant is an end user who conversed with the contact center.
END_USER = 3;
// Participant is either a human or automated agent.
ANY_AGENT = 4;
}
oneof participant {
// The name of the participant provided by Dialogflow. Format:
// projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}
string dialogflow_participant_name = 5 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Participant"
}];
// A user-specified ID representing the participant.
string user_id = 6;
}
// Deprecated. Use `dialogflow_participant_name` instead.
// The name of the Dialogflow participant. Format:
// projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}
string dialogflow_participant = 1 [deprecated = true];
// Obfuscated user ID from Dialogflow.
string obfuscated_external_user_id = 3;
// The role of the participant.
Role role = 2;
}
// The View resource.
message View {
option (google.api.resource) = {
type: "contactcenterinsights.googleapis.com/View"
pattern: "projects/{project}/locations/{location}/views/{view}"
};
// Immutable. The resource name of the view.
// Format:
// projects/{project}/locations/{location}/views/{view}
string name = 1 [(google.api.field_behavior) = IMMUTABLE];
// The human-readable display name of the view.
string display_name = 2;
// Output only. The time at which this view was created.
google.protobuf.Timestamp create_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The most recent time at which the view was updated.
google.protobuf.Timestamp update_time = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// String with specific view properties, must be non-empty.
string value = 5;
}
// Selector of all available annotators and phrase matchers to run.
message AnnotatorSelector {
// Configuration for summarization.
message SummarizationConfig {
// Summarization model to use, if `conversation_profile` is not used.
enum SummarizationModel {
// Unspecified summarization model.
SUMMARIZATION_MODEL_UNSPECIFIED = 0;
// The Insights baseline model.
BASELINE_MODEL = 1;
}
// Summarization must use either a preexisting conversation profile or one
// of the supported default models.
oneof model_source {
// Resource name of the Dialogflow conversation profile.
// Format:
// projects/{project}/locations/{location}/conversationProfiles/{conversation_profile}
string conversation_profile = 1 [(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/ConversationProfile"
}];
// Default summarization model to be used.
SummarizationModel summarization_model = 2;
}
}
// Whether to run the interruption annotator.
bool run_interruption_annotator = 1;
// Whether to run the silence annotator.
bool run_silence_annotator = 2;
// Whether to run the active phrase matcher annotator(s).
bool run_phrase_matcher_annotator = 3;
// The list of phrase matchers to run. If not provided, all active phrase
// matchers will be used. If inactive phrase matchers are provided, they will
// not be used. Phrase matchers will be run only if
// run_phrase_matcher_annotator is set to true. Format:
// projects/{project}/locations/{location}/phraseMatchers/{phrase_matcher}
repeated string phrase_matchers = 4 [(google.api.resource_reference) = {
type: "contactcenterinsights.googleapis.com/PhraseMatcher"
}];
// Whether to run the sentiment annotator.
bool run_sentiment_annotator = 5;
// Whether to run the entity annotator.
bool run_entity_annotator = 6;
// Whether to run the intent annotator.
bool run_intent_annotator = 7;
// Whether to run the issue model annotator. A model should have already been
// deployed for this to take effect.
bool run_issue_model_annotator = 8;
// The issue model to run. If not provided, the most recently deployed topic
// model will be used. The provided issue model will only be used for
// inference if the issue model is deployed and if run_issue_model_annotator
// is set to true. If more than one issue model is provided, only the first
// provided issue model will be used for inference.
repeated string issue_models = 10 [(google.api.resource_reference) = {
type: "contactcenterinsights.googleapis.com/IssueModel"
}];
// Whether to run the summarization annotator.
bool run_summarization_annotator = 9;
// Configuration for the summarization annotator.
SummarizationConfig summarization_config = 11;
}