
google.cloud.visionai.v1.platform.proto Maven / Gradle / Ivy
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.visionai.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/visionai/v1/annotations.proto";
import "google/cloud/visionai/v1/common.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Cloud.VisionAI.V1";
option go_package = "cloud.google.com/go/visionai/apiv1/visionaipb;visionaipb";
option java_multiple_files = true;
option java_outer_classname = "PlatformProto";
option java_package = "com.google.cloud.visionai.v1";
option php_namespace = "Google\\Cloud\\VisionAI\\V1";
option ruby_package = "Google::Cloud::VisionAI::V1";
// Service describing handlers for resources
service AppPlatform {
option (google.api.default_host) = "visionai.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform";
// Lists Applications in a given project and location.
rpc ListApplications(ListApplicationsRequest)
returns (ListApplicationsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*}/applications"
};
option (google.api.method_signature) = "parent";
}
// Gets details of a single Application.
rpc GetApplication(GetApplicationRequest) returns (Application) {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/applications/*}"
};
option (google.api.method_signature) = "name";
}
// Creates a new Application in a given project and location.
rpc CreateApplication(CreateApplicationRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/locations/*}/applications"
body: "application"
};
option (google.api.method_signature) = "parent,application";
option (google.longrunning.operation_info) = {
response_type: "Application"
metadata_type: "OperationMetadata"
};
}
// Updates the parameters of a single Application.
rpc UpdateApplication(UpdateApplicationRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1/{application.name=projects/*/locations/*/applications/*}"
body: "application"
};
option (google.api.method_signature) = "application,update_mask";
option (google.longrunning.operation_info) = {
response_type: "Application"
metadata_type: "OperationMetadata"
};
}
// Deletes a single Application.
rpc DeleteApplication(DeleteApplicationRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1/{name=projects/*/locations/*/applications/*}"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "google.protobuf.Empty"
metadata_type: "OperationMetadata"
};
}
// Deploys a single Application.
rpc DeployApplication(DeployApplicationRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:deploy"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "DeployApplicationResponse"
metadata_type: "OperationMetadata"
};
}
// Undeploys a single Application.
rpc UndeployApplication(UndeployApplicationRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:undeploy"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "UndeployApplicationResponse"
metadata_type: "OperationMetadata"
};
}
// Adds target stream input to the Application.
// If the Application is deployed, the corresponding new Application instance
// will be created. If the stream has already been in the Application, the RPC
// will fail.
rpc AddApplicationStreamInput(AddApplicationStreamInputRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:addStreamInput"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "AddApplicationStreamInputResponse"
metadata_type: "OperationMetadata"
};
}
// Remove target stream input to the Application, if the Application is
// deployed, the corresponding instance based will be deleted. If the stream
// is not in the Application, the RPC will fail.
rpc RemoveApplicationStreamInput(RemoveApplicationStreamInputRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:removeStreamInput"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "RemoveApplicationStreamInputResponse"
metadata_type: "OperationMetadata"
};
}
// Update target stream input to the Application, if the Application is
// deployed, the corresponding instance based will be deployed. For
// CreateOrUpdate behavior, set allow_missing to true.
rpc UpdateApplicationStreamInput(UpdateApplicationStreamInputRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:updateStreamInput"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "UpdateApplicationStreamInputResponse"
metadata_type: "OperationMetadata"
};
}
// Lists Instances in a given project and location.
rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*/applications/*}/instances"
};
option (google.api.method_signature) = "parent";
}
// Gets details of a single Instance.
rpc GetInstance(GetInstanceRequest) returns (Instance) {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/applications/*/instances/*}"
};
option (google.api.method_signature) = "name";
}
// Adds target stream input to the Application.
// If the Application is deployed, the corresponding new Application instance
// will be created. If the stream has already been in the Application, the RPC
// will fail.
rpc CreateApplicationInstances(CreateApplicationInstancesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:createApplicationInstances"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "CreateApplicationInstancesResponse"
metadata_type: "OperationMetadata"
};
}
// Remove target stream input to the Application, if the Application is
// deployed, the corresponding instance based will be deleted. If the stream
// is not in the Application, the RPC will fail.
rpc DeleteApplicationInstances(DeleteApplicationInstancesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:deleteApplicationInstances"
body: "*"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "Instance"
metadata_type: "OperationMetadata"
};
}
// Adds target stream input to the Application.
// If the Application is deployed, the corresponding new Application instance
// will be created. If the stream has already been in the Application, the RPC
// will fail.
rpc UpdateApplicationInstances(UpdateApplicationInstancesRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/applications/*}:updateApplicationInstances"
body: "*"
};
option (google.api.method_signature) = "name, application_instances";
option (google.longrunning.operation_info) = {
response_type: "UpdateApplicationInstancesResponse"
metadata_type: "OperationMetadata"
};
}
// Lists Drafts in a given project and location.
rpc ListDrafts(ListDraftsRequest) returns (ListDraftsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*/applications/*}/drafts"
};
option (google.api.method_signature) = "parent";
}
// Gets details of a single Draft.
rpc GetDraft(GetDraftRequest) returns (Draft) {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/applications/*/drafts/*}"
};
option (google.api.method_signature) = "name";
}
// Creates a new Draft in a given project and location.
rpc CreateDraft(CreateDraftRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/locations/*/applications/*}/drafts"
body: "draft"
};
option (google.api.method_signature) = "parent,draft,draft_id";
option (google.longrunning.operation_info) = {
response_type: "Draft"
metadata_type: "OperationMetadata"
};
}
// Updates the parameters of a single Draft.
rpc UpdateDraft(UpdateDraftRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1/{draft.name=projects/*/locations/*/applications/*/drafts/*}"
body: "draft"
};
option (google.api.method_signature) = "draft,update_mask";
option (google.longrunning.operation_info) = {
response_type: "Draft"
metadata_type: "OperationMetadata"
};
}
// Deletes a single Draft.
rpc DeleteDraft(DeleteDraftRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1/{name=projects/*/locations/*/applications/*/drafts/*}"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "google.protobuf.Empty"
metadata_type: "OperationMetadata"
};
}
// Lists Processors in a given project and location.
rpc ListProcessors(ListProcessorsRequest) returns (ListProcessorsResponse) {
option (google.api.http) = {
get: "/v1/{parent=projects/*/locations/*}/processors"
};
option (google.api.method_signature) = "parent";
}
// ListPrebuiltProcessors is a custom pass-through verb that Lists Prebuilt
// Processors.
rpc ListPrebuiltProcessors(ListPrebuiltProcessorsRequest)
returns (ListPrebuiltProcessorsResponse) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/locations/*}/processors:prebuilt"
body: "*"
};
option (google.api.method_signature) = "parent";
}
// Gets details of a single Processor.
rpc GetProcessor(GetProcessorRequest) returns (Processor) {
option (google.api.http) = {
get: "/v1/{name=projects/*/locations/*/processors/*}"
};
option (google.api.method_signature) = "name";
}
// Creates a new Processor in a given project and location.
rpc CreateProcessor(CreateProcessorRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/{parent=projects/*/locations/*}/processors"
body: "processor"
};
option (google.api.method_signature) = "parent,processor,processor_id";
option (google.longrunning.operation_info) = {
response_type: "Processor"
metadata_type: "OperationMetadata"
};
}
// Updates the parameters of a single Processor.
rpc UpdateProcessor(UpdateProcessorRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1/{processor.name=projects/*/locations/*/processors/*}"
body: "processor"
};
option (google.api.method_signature) = "processor,update_mask";
option (google.longrunning.operation_info) = {
response_type: "Processor"
metadata_type: "OperationMetadata"
};
}
// Deletes a single Processor.
rpc DeleteProcessor(DeleteProcessorRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1/{name=projects/*/locations/*/processors/*}"
};
option (google.api.method_signature) = "name";
option (google.longrunning.operation_info) = {
response_type: "google.protobuf.Empty"
metadata_type: "OperationMetadata"
};
}
}
// All the supported model types in Vision AI App Platform.
enum ModelType {
// Processor Type UNSPECIFIED.
MODEL_TYPE_UNSPECIFIED = 0;
// Model Type Image Classification.
IMAGE_CLASSIFICATION = 1;
// Model Type Object Detection.
OBJECT_DETECTION = 2;
// Model Type Video Classification.
VIDEO_CLASSIFICATION = 3;
// Model Type Object Tracking.
VIDEO_OBJECT_TRACKING = 4;
// Model Type Action Recognition.
VIDEO_ACTION_RECOGNITION = 5;
// Model Type Occupancy Counting.
OCCUPANCY_COUNTING = 6;
// Model Type Person Blur.
PERSON_BLUR = 7;
// Model Type Vertex Custom.
VERTEX_CUSTOM = 8;
// Model Type Product Recognizer.
PRODUCT_RECOGNIZER = 9;
// Model Type Tag Recognizer.
TAG_RECOGNIZER = 10;
// Model Type SynthID.
SYNTH_ID = 15;
}
// Represents a hardware accelerator type.
enum AcceleratorType {
// Unspecified accelerator type, which means no accelerator.
ACCELERATOR_TYPE_UNSPECIFIED = 0;
// Nvidia Tesla K80 GPU.
NVIDIA_TESLA_K80 = 1;
// Nvidia Tesla P100 GPU.
NVIDIA_TESLA_P100 = 2;
// Nvidia Tesla V100 GPU.
NVIDIA_TESLA_V100 = 3;
// Nvidia Tesla P4 GPU.
NVIDIA_TESLA_P4 = 4;
// Nvidia Tesla T4 GPU.
NVIDIA_TESLA_T4 = 5;
// Nvidia Tesla A100 GPU.
NVIDIA_TESLA_A100 = 8;
// TPU v2.
TPU_V2 = 6;
// TPU v3.
TPU_V3 = 7;
}
// All supported data types.
enum DataType {
// The default value of DataType.
DATA_TYPE_UNSPECIFIED = 0;
// Video data type like H264.
VIDEO = 1;
// Image data type.
IMAGE = 3;
// Protobuf data type, usually used for general data blob.
PROTO = 2;
// A placeholder data type, applicable for the universal input processor which
// supports any data type. This will be instantiated and replaced by a
// concrete underlying `DataType` during instance deployment.
PLACEHOLDER = 4;
}
// Message for DeleteApplicationInstance Response.
message DeleteApplicationInstancesResponse {}
// Message for CreateApplicationInstance Response.
message CreateApplicationInstancesResponse {}
// Message for UpdateApplicationInstances Response.
message UpdateApplicationInstancesResponse {}
// Message for adding stream input to an Application.
message CreateApplicationInstancesRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// Required. The resources being created.
repeated ApplicationInstance application_instances = 2
[(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Message for removing stream input from an Application.
message DeleteApplicationInstancesRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// Required. Id of the requesting object.
repeated string instance_ids = 2 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Instance"
}
];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
}
// RPC Request Messages.
// Message for DeployApplication Response.
message DeployApplicationResponse {}
// Message for UndeployApplication Response.
message UndeployApplicationResponse {}
// Message for RemoveApplicationStreamInput Response.
message RemoveApplicationStreamInputResponse {}
// Message for AddApplicationStreamInput Response.
message AddApplicationStreamInputResponse {}
// Message for AddApplicationStreamInput Response.
message UpdateApplicationStreamInputResponse {}
// Message for requesting list of Applications.
message ListApplicationsRequest {
// Required. Parent value for ListApplicationsRequest.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Application"
}
];
// Requested page size. Server may return fewer items than requested.
// If unspecified, server will pick an appropriate default.
int32 page_size = 2;
// A token identifying a page of results the server should return.
string page_token = 3;
// Filtering results.
string filter = 4;
// Hint for how to order the results.
string order_by = 5;
}
// Message for response to listing Applications.
message ListApplicationsResponse {
// The list of Application.
repeated Application applications = 1;
// A token identifying a page of results the server should return.
string next_page_token = 2;
// Locations that could not be reached.
repeated string unreachable = 3;
}
// Message for getting a Application.
message GetApplicationRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
}
// Message for creating a Application.
message CreateApplicationRequest {
// Required. Value for parent.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Application"
}
];
// Required. Id of the requesting object.
string application_id = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The resource being created.
Application application = 3 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Message for updating an Application.
message UpdateApplicationRequest {
// Optional. Field mask is used to specify the fields to be overwritten in the
// Application resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten.
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = OPTIONAL];
// Required. The resource being updated.
Application application = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Message for deleting an Application.
message DeleteApplicationRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes after the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. If set to true, any instances and drafts from this application
// will also be deleted. (Otherwise, the request will only work if the
// application has no instances and drafts.)
bool force = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Message for deploying an Application.
message DeployApplicationRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// If set, validate the request and preview the application graph, but do not
// actually deploy it.
bool validate_only = 2;
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
// Optional. Whether or not to enable monitoring for the application on
// deployment.
bool enable_monitoring = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Message for undeploying an Application.
message UndeployApplicationRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Message about a single stream input config.
message ApplicationStreamInput {
StreamWithAnnotation stream_with_annotation = 1;
}
// Message for adding stream input to an Application.
message AddApplicationStreamInputRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// The stream inputs to add, the stream resource name is the key of each
// StreamInput, and it must be unique within each application.
repeated ApplicationStreamInput application_stream_inputs = 2;
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Message for updating stream input to an Application.
message UpdateApplicationStreamInputRequest {
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// The stream inputs to update, the stream resource name is the key of each
// StreamInput, and it must be unique within each application.
repeated ApplicationStreamInput application_stream_inputs = 2;
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
// If true, UpdateApplicationStreamInput will insert stream input to
// application even if the target stream is not included in the application.
bool allow_missing = 4;
}
// Message for removing stream input from an Application.
message RemoveApplicationStreamInputRequest {
// Message about target streamInput to remove.
message TargetStreamInput {
string stream = 1 [(google.api.resource_reference) = {
type: "visionai.googleapis.com/Stream"
}];
}
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
// The target stream to remove.
repeated TargetStreamInput target_stream_inputs = 2;
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Message for requesting list of Instances.
message ListInstancesRequest {
// Required. Parent value for ListInstancesRequest.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Instance"
}
];
// Requested page size. Server may return fewer items than requested.
// If unspecified, server will pick an appropriate default.
int32 page_size = 2;
// A token identifying a page of results the server should return.
string page_token = 3;
// Filtering results.
string filter = 4;
// Hint for how to order the results.
string order_by = 5;
}
// Message for response to listing Instances.
message ListInstancesResponse {
// The list of Instance.
repeated Instance instances = 1;
// A token identifying a page of results the server should return.
string next_page_token = 2;
// Locations that could not be reached.
repeated string unreachable = 3;
}
// Message for getting a Instance.
message GetInstanceRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Instance"
}
];
}
// Message for requesting list of Drafts.
message ListDraftsRequest {
// Required. Parent value for ListDraftsRequest.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Draft"
}
];
// Requested page size. Server may return fewer items than requested.
// If unspecified, server will pick an appropriate default.
int32 page_size = 2;
// A token identifying a page of results the server should return.
string page_token = 3;
// Filtering results.
string filter = 4;
// Hint for how to order the results.
string order_by = 5;
}
// Message for response to listing Drafts.
message ListDraftsResponse {
// The list of Draft.
repeated Draft drafts = 1;
// A token identifying a page of results the server should return.
string next_page_token = 2;
// Locations that could not be reached.
repeated string unreachable = 3;
}
// Message for getting a Draft.
message GetDraftRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = { type: "visionai.googleapis.com/Draft" }
];
}
// Message for creating a Draft.
message CreateDraftRequest {
// Required. Value for parent.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Draft"
}
];
// Required. Id of the requesting object.
string draft_id = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The resource being created.
Draft draft = 3 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Message for updating a Draft.
message UpdateDraftRequest {
// Optional. Field mask is used to specify the fields to be overwritten in the
// Draft resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten.
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = OPTIONAL];
// Required. The resource being updated.
Draft draft = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
// If true, UpdateDraftRequest will create one resource if the target resource
// doesn't exist, this time, the field_mask will be ignored.
bool allow_missing = 4;
}
// Message for updating an ApplicationInstance.
message UpdateApplicationInstancesRequest {
message UpdateApplicationInstance {
// Optional. Field mask is used to specify the fields to be overwritten in
// the Draft resource by the update. The fields specified in the update_mask
// are relative to the resource, not the full request. A field will be
// overwritten if it is in the mask. If the user does not provide a mask
// then all fields will be overwritten.
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = OPTIONAL];
// Required. The resource being updated.
Instance instance = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The id of the instance.
string instance_id = 3 [(google.api.field_behavior) = REQUIRED];
}
// Required. the name of the application to retrieve.
// Format:
// "projects/{project}/locations/{location}/applications/{application}"
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Application"
}
];
repeated UpdateApplicationInstance application_instances = 2;
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
// If true, Update Request will create one resource if the target resource
// doesn't exist, this time, the field_mask will be ignored.
bool allow_missing = 4;
}
// Message for deleting a Draft.
message DeleteDraftRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = { type: "visionai.googleapis.com/Draft" }
];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes after the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Message for requesting list of Processors.
message ListProcessorsRequest {
// Required. Parent value for ListProcessorsRequest.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Processor"
}
];
// Requested page size. Server may return fewer items than requested.
// If unspecified, server will pick an appropriate default.
int32 page_size = 2;
// A token identifying a page of results the server should return.
string page_token = 3;
// Filtering results.
string filter = 4;
// Hint for how to order the results.
string order_by = 5;
}
// Message for response to listing Processors.
message ListProcessorsResponse {
// The list of Processor.
repeated Processor processors = 1;
// A token identifying a page of results the server should return.
string next_page_token = 2;
// Locations that could not be reached.
repeated string unreachable = 3;
}
// Request Message for listing Prebuilt Processors.
message ListPrebuiltProcessorsRequest {
// Required. Parent path.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Processor"
}
];
}
// Response Message for listing Prebuilt Processors.
message ListPrebuiltProcessorsResponse {
// The list of Processor.
repeated Processor processors = 1;
}
// Message for getting a Processor.
message GetProcessorRequest {
// Required. Name of the resource.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Processor"
}
];
}
// Message for creating a Processor.
message CreateProcessorRequest {
// Required. Value for parent.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "visionai.googleapis.com/Processor"
}
];
// Required. Id of the requesting object.
string processor_id = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The resource being created.
Processor processor = 3 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 4 [(google.api.field_behavior) = OPTIONAL];
}
// Message for updating a Processor.
message UpdateProcessorRequest {
// Optional. Field mask is used to specify the fields to be overwritten in the
// Processor resource by the update.
// The fields specified in the update_mask are relative to the resource, not
// the full request. A field will be overwritten if it is in the mask. If the
// user does not provide a mask then all fields will be overwritten.
google.protobuf.FieldMask update_mask = 1
[(google.api.field_behavior) = OPTIONAL];
// Required. The resource being updated.
Processor processor = 2 [(google.api.field_behavior) = REQUIRED];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes since the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 3 [(google.api.field_behavior) = OPTIONAL];
}
// Message for deleting a Processor.
message DeleteProcessorRequest {
// Required. Name of the resource
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "visionai.googleapis.com/Processor"
}
];
// Optional. An optional request ID to identify requests. Specify a unique
// request ID so that if you must retry your request, the server will know to
// ignore the request if it has already been completed. The server will
// guarantee that for at least 60 minutes after the first request.
//
// For example, consider a situation where you make an initial request and
// the request times out. If you make the request again with the same request
// ID, the server can check if original operation with the same request ID
// was received, and if so, will ignore the second request. This prevents
// clients from accidentally creating duplicate commitments.
//
// The request ID must be a valid UUID with the exception that zero UUID is
// not supported (00000000-0000-0000-0000-000000000000).
string request_id = 2 [(google.api.field_behavior) = OPTIONAL];
}
// Message describing Application object
message Application {
option (google.api.resource) = {
type: "visionai.googleapis.com/Application"
pattern: "projects/{project}/locations/{location}/applications/{application}"
style: DECLARATIVE_FRIENDLY
};
// Message storing the runtime information of the application.
message ApplicationRuntimeInfo {
// Message about output resources from application.
message GlobalOutputResource {
// The full resource name of the outputted resources.
string output_resource = 1;
// The name of graph node who produces the output resource name.
// For example:
// output_resource:
// /projects/123/locations/us-central1/corpora/my-corpus/dataSchemas/my-schema
// producer_node: occupancy-count
string producer_node = 2;
// The key of the output resource, it has to be unique within the same
// producer node. One producer node can output several output resources,
// the key can be used to match corresponding output resources.
string key = 3;
}
// Monitoring-related configuration for an application.
message MonitoringConfig {
// Whether this application has monitoring enabled.
bool enabled = 1;
}
// Timestamp when the engine be deployed
google.protobuf.Timestamp deploy_time = 1;
// Globally created resources like warehouse dataschemas.
repeated GlobalOutputResource global_output_resources = 3;
// Monitoring-related configuration for this application.
MonitoringConfig monitoring_config = 4;
}
// State of the Application
enum State {
// The default value. This value is used if the state is omitted.
STATE_UNSPECIFIED = 0;
// State CREATED.
CREATED = 1;
// State DEPLOYING.
DEPLOYING = 2;
// State DEPLOYED.
DEPLOYED = 3;
// State UNDEPLOYING.
UNDEPLOYING = 4;
// State DELETED.
DELETED = 5;
// State ERROR.
ERROR = 6;
// State CREATING.
CREATING = 7;
// State Updating.
UPDATING = 8;
// State Deleting.
DELETING = 9;
// State Fixing.
FIXING = 10;
}
// Billing mode of the Application
enum BillingMode {
// The default value.
BILLING_MODE_UNSPECIFIED = 0;
// Pay as you go billing mode.
PAYG = 1;
// Monthly billing mode.
MONTHLY = 2;
}
// name of resource
string name = 1;
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] Update timestamp
google.protobuf.Timestamp update_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Labels as key value pairs
map labels = 4;
// Required. A user friendly display name for the solution.
string display_name = 5 [(google.api.field_behavior) = REQUIRED];
// A description for this application.
string description = 6;
// Application graph configuration.
ApplicationConfigs application_configs = 7;
// Output only. Application graph runtime info. Only exists when application
// state equals to DEPLOYED.
ApplicationRuntimeInfo runtime_info = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. State of the application.
State state = 9 [(google.api.field_behavior) = OUTPUT_ONLY];
// Billing mode of the application.
BillingMode billing_mode = 12;
}
// Message storing the graph of the application.
message ApplicationConfigs {
// message storing the config for event delivery
message EventDeliveryConfig {
// The delivery channel for the event notification, only pub/sub topic is
// supported now.
// Example channel:
// [//pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic]
string channel = 1;
// The expected delivery interval for the same event. The same event won't
// be notified multiple times during this internal event that it is
// happening multiple times during the period of time.The same event is
// identified by .
google.protobuf.Duration minimal_delivery_interval = 2;
}
// A list of nodes in the application graph.
repeated Node nodes = 1;
// Event-related configuration for this application.
EventDeliveryConfig event_delivery_config = 3;
}
// Message describing node object.
message Node {
// Message describing one edge pointing into a node.
message InputEdge {
// The name of the parent node.
string parent_node = 1;
// The connected output artifact of the parent node.
// It can be omitted if target processor only has 1 output artifact.
string parent_output_channel = 2;
// The connected input channel of the current node's processor.
// It can be omitted if target processor only has 1 input channel.
string connected_input_channel = 3;
}
oneof stream_output_config {
// By default, the output of the node will only be available to downstream
// nodes. To consume the direct output from the application node, the output
// must be sent to Vision AI Streams at first.
//
// By setting output_all_output_channels_to_stream to true, App Platform
// will automatically send all the outputs of the current node to Vision AI
// Stream resources (one stream per output channel). The output stream
// resource will be created by App Platform automatically during deployment
// and deleted after application un-deployment.
// Note that this config applies to all the Application Instances.
//
// The output stream can be override at instance level by
// configuring the `output_resources` section of Instance resource.
// `producer_node` should be current node, `output_resource_binding` should
// be the output channel name (or leave it blank if there is only 1 output
// channel of the processor) and `output_resource` should be the target
// output stream.
bool output_all_output_channels_to_stream = 6;
}
// Required. A unique name for the node.
string name = 1 [(google.api.field_behavior) = REQUIRED];
// A user friendly display name for the node.
string display_name = 2;
// Node config.
ProcessorConfig node_config = 3;
// Processor name refer to the chosen processor resource.
string processor = 4;
// Parent node. Input node should not have parent node. For V1 Alpha1/Beta
// only media warehouse node can have multiple parents, other types of nodes
// will only have one parent.
repeated InputEdge parents = 5;
}
// Message describing Draft object
message Draft {
option (google.api.resource) = {
type: "visionai.googleapis.com/Draft"
pattern: "projects/{project}/locations/{location}/applications/{application}/drafts/{draft}"
style: DECLARATIVE_FRIENDLY
};
// name of resource
string name = 1;
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp update_time = 7
[(google.api.field_behavior) = OUTPUT_ONLY];
// Labels as key value pairs
map labels = 3;
// Required. A user friendly display name for the solution.
string display_name = 4 [(google.api.field_behavior) = REQUIRED];
// A description for this application.
string description = 5;
// The draft application configs which haven't been updated to an application.
ApplicationConfigs draft_application_configs = 6;
}
// Message describing Instance object
// Next ID: 12
message Instance {
option (google.api.resource) = {
type: "visionai.googleapis.com/Instance"
pattern: "projects/{project}/locations/{location}/applications/{application}/instances/{instance}"
style: DECLARATIVE_FRIENDLY
};
// Message of input resource used in one application instance.
message InputResource {
// Required. Specify the input to the application instance.
oneof input_resource_information {
// The direct input resource name.
// If the instance type is STREAMING_PREDICTION, the input resource is in
// format of
// "projects/123/locations/us-central1/clusters/456/streams/stream-a".
// If the instance type is BATCH_PREDICTION from Cloud Storage input
// container, the input resource is in format of "gs://bucket-a".
string input_resource = 1;
// If the input resource is VisionAI Stream, the associated annotations
// can be specified using annotated_stream instead.
StreamWithAnnotation annotated_stream = 4 [deprecated = true];
}
// Data type for the current input resource.
DataType data_type = 6;
// The name of graph node who receives the input resource.
// For example:
// input_resource:
// visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/input-stream-a
// consumer_node: stream-input
string consumer_node = 2;
// The specific input resource binding which will consume the current Input
// Resource, can be ignored is there is only 1 input binding.
string input_resource_binding = 3;
// Contains resource annotations.
ResourceAnnotations annotations = 5;
}
// Message of output resource used in one application instance.
message OutputResource {
// The output resource name for the current application instance.
string output_resource = 1;
// The name of graph node who produces the output resource name.
// For example:
// output_resource:
// /projects/123/locations/us-central1/clusters/456/streams/output-application-789-stream-a-occupancy-counting
// producer_node: occupancy-counting
string producer_node = 2;
// The specific output resource binding which produces the current
// OutputResource.
string output_resource_binding = 4;
// Output only. Whether the output resource is temporary which means the
// resource is generated during the deployment of the application. Temporary
// resource will be deleted during the undeployment of the application.
bool is_temporary = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Whether the output resource is created automatically by the
// Vision AI App Platform.
bool autogen = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// All the supported instance types.
enum InstanceType {
// Unspecified instance type.
// If the instance type is not specified, the default one is
// STREAMING_PREDICTION.
INSTANCE_TYPE_UNSPECIFIED = 0;
// Instance type for streaming prediction.
STREAMING_PREDICTION = 1;
// Instance type for batch prediction.
BATCH_PREDICTION = 2;
// Instance type for online prediction.
ONLINE_PREDICTION = 3;
}
// State of the Instance
enum State {
// The default value. This value is used if the state is omitted.
STATE_UNSPECIFIED = 0;
// State CREATING.
CREATING = 1;
// State CREATED.
CREATED = 2;
// State DEPLOYING.
DEPLOYING = 3;
// State DEPLOYED.
DEPLOYED = 4;
// State UNDEPLOYING.
UNDEPLOYING = 5;
// State DELETED.
DELETED = 6;
// State ERROR.
ERROR = 7;
// State Updating
UPDATING = 8;
// State Deleting.
DELETING = 9;
// State Fixing.
FIXING = 10;
// State Finished.
FINISHED = 11;
}
// Output only. name of resource
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] Create timestamp
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] Update timestamp
google.protobuf.Timestamp update_time = 8
[(google.api.field_behavior) = OUTPUT_ONLY];
// Labels as key value pairs
map labels = 3;
// Required. A user friendly display name for the solution.
string display_name = 4 [(google.api.field_behavior) = REQUIRED];
// A description for this instance.
string description = 5;
// The instance type for the current instance.
InstanceType instance_type = 10;
// The input resources for the current application instance.
// For example:
// input_resources:
// visionai.googleapis.com/v1/projects/123/locations/us-central1/clusters/456/streams/stream-a
repeated InputResource input_resources = 6;
// All the output resources associated to one application instance.
repeated OutputResource output_resources = 7;
// State of the instance.
State state = 9;
}
// Message for creating a Instance.
message ApplicationInstance {
// Required. Id of the requesting object.
string instance_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The resource being created.
Instance instance = 2 [(google.api.field_behavior) = REQUIRED];
}
// Message describing Processor object.
// Next ID: 19
message Processor {
option (google.api.resource) = {
type: "visionai.googleapis.com/Processor"
pattern: "projects/{project}/locations/{location}/processors/{processor}"
style: DECLARATIVE_FRIENDLY
};
// Type
enum ProcessorType {
// Processor Type UNSPECIFIED.
PROCESSOR_TYPE_UNSPECIFIED = 0;
// Processor Type PRETRAINED.
// Pretrained processor is developed by Vision AI App Platform with
// state-of-the-art vision data processing functionality, like occupancy
// counting or person blur. Pretrained processor is usually publicly
// available.
PRETRAINED = 1;
// Processor Type CUSTOM.
// Custom processors are specialized processors which are either uploaded by
// customers or imported from other GCP platform (for example Vertex AI).
// Custom processor is only visible to the creator.
CUSTOM = 2;
// Processor Type CONNECTOR.
// Connector processors are special processors which perform I/O for the
// application, they do not processing the data but either deliver the data
// to other processors or receive data from other processors.
CONNECTOR = 3;
}
enum ProcessorState {
// Unspecified Processor state.
PROCESSOR_STATE_UNSPECIFIED = 0;
// Processor is being created (not ready for use).
CREATING = 1;
// Processor is and ready for use.
ACTIVE = 2;
// Processor is being deleted (not ready for use).
DELETING = 3;
// Processor deleted or creation failed .
FAILED = 4;
}
// name of resource.
string name = 1;
// Output only. [Output only] Create timestamp.
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] Update timestamp.
google.protobuf.Timestamp update_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// Labels as key value pairs.
map labels = 4;
// Required. A user friendly display name for the processor.
string display_name = 5 [(google.api.field_behavior) = REQUIRED];
// Illustrative sentences for describing the functionality of the processor.
string description = 10;
// Output only. Processor Type.
ProcessorType processor_type = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
// Model Type.
ModelType model_type = 13;
// Source info for customer created processor.
CustomProcessorSourceInfo custom_processor_source_info = 7;
// Output only. State of the Processor.
ProcessorState state = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. [Output only] The input / output specifications of a
// processor, each type of processor has fixed input / output specs which
// cannot be altered by customer.
ProcessorIOSpec processor_io_spec = 11
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. The corresponding configuration can be used in the Application
// to customize the behavior of the processor.
string configuration_typeurl = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
repeated StreamAnnotationType supported_annotation_types = 15
[(google.api.field_behavior) = OUTPUT_ONLY];
// Indicates if the processor supports post processing.
bool supports_post_processing = 17;
// Which instance types this processor supports; if empty, this default to
// STREAMING_PREDICTION.
repeated Instance.InstanceType supported_instance_types = 18;
}
// Message describing the input / output specifications of a processor.
message ProcessorIOSpec {
// Message for input channel specification.
message GraphInputChannelSpec {
// The name of the current input channel.
string name = 1;
// The data types of the current input channel.
// When this field has more than 1 value, it means this input channel can be
// connected to either of these different data types.
DataType data_type = 2;
// If specified, only those detailed data types can be connected to the
// processor. For example, jpeg stream for MEDIA, or PredictionResult proto
// for PROTO type. If unspecified, then any proto is accepted.
repeated string accepted_data_type_uris = 5;
// Whether the current input channel is required by the processor.
// For example, for a processor with required video input and optional audio
// input, if video input is missing, the application will be rejected while
// the audio input can be missing as long as the video input exists.
bool required = 3;
// How many input edges can be connected to this input channel. 0 means
// unlimited.
int64 max_connection_allowed = 4;
}
// Message for output channel specification.
message GraphOutputChannelSpec {
// The name of the current output channel.
string name = 1;
// The data type of the current output channel.
DataType data_type = 2;
string data_type_uri = 3;
}
// Message for instance resource channel specification.
// External resources are virtual nodes which are not expressed in the
// application graph. Each processor expresses its out-graph spec, so customer
// is able to override the external source or destinations to the
message InstanceResourceInputBindingSpec {
oneof resource_type {
// The configuration proto that includes the Googleapis resources. I.e.
// type.googleapis.com/google.cloud.vision.v1.StreamWithAnnotation
string config_type_uri = 2;
// The direct type url of Googleapis resource. i.e.
// type.googleapis.com/google.cloud.vision.v1.Asset
string resource_type_uri = 3;
}
// Name of the input binding, unique within the processor.
string name = 1;
}
message InstanceResourceOutputBindingSpec {
// Name of the output binding, unique within the processor.
string name = 1;
// The resource type uri of the acceptable output resource.
string resource_type_uri = 2;
// Whether the output resource needs to be explicitly set in the instance.
// If it is false, the processor will automatically generate it if required.
bool explicit = 3;
}
// For processors with input_channel_specs, the processor must be explicitly
// connected to another processor.
repeated GraphInputChannelSpec graph_input_channel_specs = 3;
// The output artifact specifications for the current processor.
repeated GraphOutputChannelSpec graph_output_channel_specs = 4;
// The input resource that needs to be fed from the application instance.
repeated InstanceResourceInputBindingSpec
instance_resource_input_binding_specs = 5;
// The output resource that the processor will generate per instance.
// Other than the explicitly listed output bindings here, all the processors'
// GraphOutputChannels can be binded to stream resource. The bind name then is
// the same as the GraphOutputChannel's name.
repeated InstanceResourceOutputBindingSpec
instance_resource_output_binding_specs = 6;
}
// Describes the source info for a custom processor.
message CustomProcessorSourceInfo {
// Message describes product recognizer artifact.
message ProductRecognizerArtifact {
// Required. Resource name of RetailProductRecognitionIndex.
// Format is
// 'projects/*/locations/*/retailCatalogs/*/retailProductRecognitionIndexes/*'
string retail_product_recognition_index = 1
[(google.api.field_behavior) = REQUIRED];
// Optional. The resource name of embedding model hosted in Vertex AI
// Platform.
string vertex_model = 2 [(google.api.field_behavior) = OPTIONAL];
}
// The schema is defined as an OpenAPI 3.0.2 [Schema
// Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
message ModelSchema {
// Cloud Storage location to a YAML file that defines the format of a single
// instance used in prediction and explanation requests.
GcsSource instances_schema = 1;
// Cloud Storage location to a YAML file that defines the prediction and
// explanation parameters.
GcsSource parameters_schema = 2;
// Cloud Storage location to a YAML file that defines the format of a single
// prediction or explanation.
GcsSource predictions_schema = 3;
}
// Source type of the imported custom processor.
enum SourceType {
// Source type unspecified.
SOURCE_TYPE_UNSPECIFIED = 0;
// Custom processors coming from Vertex AutoML product.
VERTEX_AUTOML = 1;
// Custom processors coming from general custom models from Vertex.
VERTEX_CUSTOM = 2;
// Source for Product Recognizer.
PRODUCT_RECOGNIZER = 3;
}
// The path where App Platform loads the artifacts for the custom processor.
oneof artifact_path {
// The resource name original model hosted in the vertex AI platform.
string vertex_model = 2;
// Artifact for product recognizer.
ProductRecognizerArtifact product_recognizer_artifact = 3;
}
// The original product which holds the custom processor's functionality.
SourceType source_type = 1;
// Output only. Additional info related to the imported custom processor.
// Data is filled in by app platform during the processor creation.
map additional_info = 4
[(google.api.field_behavior) = OUTPUT_ONLY];
// Model schema files which specifies the signature of the model.
// For VERTEX_CUSTOM models, instances schema is required.
// If instances schema is not specified during the processor creation,
// VisionAI Platform will try to get it from Vertex, if it doesn't exist, the
// creation will fail.
ModelSchema model_schema = 5;
}
// Next ID: 35
message ProcessorConfig {
oneof processor_config {
// Configs of stream input processor.
VideoStreamInputConfig video_stream_input_config = 9;
// Config of AI-enabled input devices.
AIEnabledDevicesInputConfig ai_enabled_devices_input_config = 20;
// Configs of media warehouse processor.
MediaWarehouseConfig media_warehouse_config = 10;
// Configs of person blur processor.
PersonBlurConfig person_blur_config = 11;
// Configs of occupancy count processor.
OccupancyCountConfig occupancy_count_config = 12;
// Configs of Person Vehicle Detection processor.
PersonVehicleDetectionConfig person_vehicle_detection_config = 15;
// Configs of Vertex AutoML vision processor.
VertexAutoMLVisionConfig vertex_automl_vision_config = 13;
// Configs of Vertex AutoML video processor.
VertexAutoMLVideoConfig vertex_automl_video_config = 14;
// Configs of Vertex Custom processor.
VertexCustomConfig vertex_custom_config = 17;
// Configs of General Object Detection processor.
GeneralObjectDetectionConfig general_object_detection_config = 18;
// Configs of BigQuery processor.
BigQueryConfig big_query_config = 19;
// Configs of Cloud Storage output processor.
GcsOutputConfig gcs_output_config = 27;
// Runtime configs of Product Recognizer processor.
ProductRecognizerConfig product_recognizer_config = 21;
// Configs of personal_protective_equipment_detection_config
PersonalProtectiveEquipmentDetectionConfig
personal_protective_equipment_detection_config = 22;
// Runtime configs of Tag Recognizer processor.
TagRecognizerConfig tag_recognizer_config = 25;
// Runtime configs of UniversalInput processor.
UniversalInputConfig universal_input_config = 28;
}
// Experimental configurations. Structured object containing not-yet-stable
// processor parameters.
google.protobuf.Struct experimental_config = 26;
}
// Message describing Vision AI stream with application specific annotations.
// All the StreamAnnotation object inside this message MUST have unique id.
message StreamWithAnnotation {
// Message describing annotations specific to application node.
message NodeAnnotation {
// The node name of the application graph.
string node = 1;
// The node specific stream annotations.
repeated StreamAnnotation annotations = 2;
}
// Vision AI Stream resource name.
string stream = 1 [
(google.api.resource_reference) = { type: "visionai.googleapis.com/Stream" }
];
// Annotations that will be applied to the whole application.
repeated StreamAnnotation application_annotations = 2;
// Annotations that will be applied to the specific node of the application.
// If the same type of the annotations is applied to both application and
// node, the node annotation will be added in addition to the global
// application one.
// For example, if there is one active zone annotation for the whole
// application and one active zone annotation for the Occupancy Analytic
// processor, then the Occupancy Analytic processor will have two active zones
// defined.
repeated NodeAnnotation node_annotations = 3;
}
// Message describing annotations specific to application node.
// This message is a duplication of StreamWithAnnotation.NodeAnnotation.
message ApplicationNodeAnnotation {
// The node name of the application graph.
string node = 1;
// The node specific stream annotations.
repeated StreamAnnotation annotations = 2;
}
// Message describing general annotation for resources.
message ResourceAnnotations {
// Annotations that will be applied to the whole application.
repeated StreamAnnotation application_annotations = 1;
// Annotations that will be applied to the specific node of the application.
// If the same type of the annotations is applied to both application and
// node, the node annotation will be added in addition to the global
// application one.
// For example, if there is one active zone annotation for the whole
// application and one active zone annotation for the Occupancy Analytic
// processor, then the Occupancy Analytic processor will have two active zones
// defined.
repeated ApplicationNodeAnnotation node_annotations = 2;
}
// Message describing Video Stream Input Config.
// This message should only be used as a placeholder for builtin:stream-input
// processor, actual stream binding should be specified using corresponding
// API.
message VideoStreamInputConfig {
repeated string streams = 1 [deprecated = true];
repeated StreamWithAnnotation streams_with_annotation = 2 [deprecated = true];
}
// Message describing AI-enabled Devices Input Config.
message AIEnabledDevicesInputConfig {}
// Message describing MediaWarehouseConfig.
message MediaWarehouseConfig {
// Resource name of the Media Warehouse corpus.
// Format:
// projects/${project_id}/locations/${location_id}/corpora/${corpus_id}
string corpus = 1;
// Deprecated.
string region = 2 [deprecated = true];
// The duration for which all media assets, associated metadata, and search
// documents can exist.
google.protobuf.Duration ttl = 3;
}
// Message describing FaceBlurConfig.
message PersonBlurConfig {
// Type of Person Blur
enum PersonBlurType {
// PersonBlur Type UNSPECIFIED.
PERSON_BLUR_TYPE_UNSPECIFIED = 0;
// FaceBlur Type full occlusion.
FULL_OCCULUSION = 1;
// FaceBlur Type blur filter.
BLUR_FILTER = 2;
}
// Person blur type.
PersonBlurType person_blur_type = 1;
// Whether only blur faces other than the whole object in the processor.
bool faces_only = 2;
}
// Message describing OccupancyCountConfig.
message OccupancyCountConfig {
// Whether to count the appearances of people, output counts have 'people' as
// the key.
bool enable_people_counting = 1;
// Whether to count the appearances of vehicles, output counts will have
// 'vehicle' as the key.
bool enable_vehicle_counting = 2;
// Whether to track each invidual object's loitering time inside the scene or
// specific zone.
bool enable_dwelling_time_tracking = 3;
}
// Message describing PersonVehicleDetectionConfig.
message PersonVehicleDetectionConfig {
// At least one of enable_people_counting and enable_vehicle_counting fields
// must be set to true.
// Whether to count the appearances of people, output counts have 'people' as
// the key.
bool enable_people_counting = 1;
// Whether to count the appearances of vehicles, output counts will have
// 'vehicle' as the key.
bool enable_vehicle_counting = 2;
}
// Message describing PersonalProtectiveEquipmentDetectionConfig.
message PersonalProtectiveEquipmentDetectionConfig {
// Whether to enable face coverage detection.
bool enable_face_coverage_detection = 1;
// Whether to enable head coverage detection.
bool enable_head_coverage_detection = 2;
// Whether to enable hands coverage detection.
bool enable_hands_coverage_detection = 3;
}
// Message of configurations for General Object Detection processor.
message GeneralObjectDetectionConfig {}
// Message of configurations for BigQuery processor.
message BigQueryConfig {
// BigQuery table resource for Vision AI Platform to ingest annotations to.
string table = 1;
// Data Schema
// By default, Vision AI Application will try to write annotations to the
// target BigQuery table using the following schema:
//
// ingestion_time: TIMESTAMP, the ingestion time of the original data.
//
// application: STRING, name of the application which produces the annotation.
//
// instance: STRING, Id of the instance which produces the annotation.
//
// node: STRING, name of the application graph node which produces the
// annotation.
//
// annotation: STRING or JSON, the actual annotation protobuf will be
// converted to json string with bytes field as 64 encoded string. It can be
// written to both String or Json type column.
//
// To forward annotation data to an existing BigQuery table, customer needs to
// make sure the compatibility of the schema.
// The map maps application node name to its corresponding cloud function
// endpoint to transform the annotations directly to the
// google.cloud.bigquery.storage.v1.AppendRowsRequest (only avro_rows or
// proto_rows should be set). If configured, annotations produced by
// corresponding application node will sent to the Cloud Function at first
// before be forwarded to BigQuery.
//
// If the default table schema doesn't fit, customer is able to transform the
// annotation output from Vision AI Application to arbitrary BigQuery table
// schema with CloudFunction.
// * The cloud function will receive AppPlatformCloudFunctionRequest where
// the annotations field will be the json format of Vision AI annotation.
// * The cloud function should return AppPlatformCloudFunctionResponse with
// AppendRowsRequest stored in the annotations field.
// * To drop the annotation, simply clear the annotations field in the
// returned AppPlatformCloudFunctionResponse.
map cloud_function_mapping = 2;
// If true, App Platform will create the BigQuery DataSet and the
// BigQuery Table with default schema if the specified table doesn't exist.
// This doesn't work if any cloud function customized schema is specified
// since the system doesn't know your desired schema.
// JSON column will be used in the default table created by App Platform.
bool create_default_table_if_not_exists = 3;
}
// Message of configurations of Vertex AutoML Vision Processors.
message VertexAutoMLVisionConfig {
// Only entities with higher score than the threshold will be returned.
// Value 0.0 means to return all the detected entities.
float confidence_threshold = 1;
// At most this many predictions will be returned per output frame.
// Value 0 means to return all the detected entities.
int32 max_predictions = 2;
}
// Message describing VertexAutoMLVideoConfig.
message VertexAutoMLVideoConfig {
// Only entities with higher score than the threshold will be returned.
// Value 0.0 means returns all the detected entities.
float confidence_threshold = 1;
// Labels specified in this field won't be returned.
repeated string blocked_labels = 2;
// At most this many predictions will be returned per output frame.
// Value 0 means to return all the detected entities.
int32 max_predictions = 3;
// Only Bounding Box whose size is larger than this limit will be returned.
// Object Tracking only.
// Value 0.0 means to return all the detected entities.
float bounding_box_size_limit = 4;
}
// Message describing VertexCustomConfig.
message VertexCustomConfig {
// The max prediction frame per second. This attribute sets how fast the
// operator sends prediction requests to Vertex AI endpoint. Default value is
// 0, which means there is no max prediction fps limit. The operator sends
// prediction requests at input fps.
int32 max_prediction_fps = 1;
// A description of resources that are dedicated to the DeployedModel, and
// that need a higher degree of manual configuration.
DedicatedResources dedicated_resources = 2;
// If not empty, the prediction result will be sent to the specified cloud
// function for post processing.
// * The cloud function will receive AppPlatformCloudFunctionRequest where
// the annotations field will be the json format of proto PredictResponse.
// * The cloud function should return AppPlatformCloudFunctionResponse with
// PredictResponse stored in the annotations field.
// * To drop the prediction output, simply clear the payload field in the
// returned AppPlatformCloudFunctionResponse.
string post_processing_cloud_function = 3;
// If true, the prediction request received by custom model will also contain
// metadata with the following schema:
// 'appPlatformMetadata': {
// 'ingestionTime': DOUBLE; (UNIX timestamp)
// 'application': STRING;
// 'instanceId': STRING;
// 'node': STRING;
// 'processor': STRING;
// }
bool attach_application_metadata = 4;
// Optional. By setting the configuration_input_topic, processor will
// subscribe to given topic, only pub/sub topic is supported now. Example
// channel:
// //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
// message schema should be:
// message Message {
// // The ID of the stream that associates with the application instance.
// string stream_id = 1;
// // The target fps. By default, the custom processor will *not* send any
// data to the Vertex Prediction container. Note that once the
// dynamic_config_input_topic is set, max_prediction_fps will not work and be
// preceded by the fps set inside the topic.
// int32 fps = 2;
// }
optional string dynamic_config_input_topic = 6
[(google.api.field_behavior) = OPTIONAL];
}
// Message describing GcsOutputConfig.
message GcsOutputConfig {
// The Cloud Storage path for Vision AI Platform to ingest annotations to.
string gcs_path = 1;
}
// Message describing UniversalInputConfig.
message UniversalInputConfig {}
// Specification of a single machine.
message MachineSpec {
// Immutable. The type of the machine.
//
// See the [list of machine types supported for
// prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
//
// See the [list of machine types supported for custom
// training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
//
// For [DeployedModel][] this field is optional, and the default
// value is `n1-standard-2`. For [BatchPredictionJob][] or as part of
// [WorkerPoolSpec][] this field is required.
string machine_type = 1 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The type of accelerator(s) that may be attached to the machine
// as per
// [accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count].
AcceleratorType accelerator_type = 2
[(google.api.field_behavior) = IMMUTABLE];
// The number of accelerators to attach to the machine.
int32 accelerator_count = 3;
}
// The metric specification that defines the target resource utilization
// (CPU utilization, accelerator's duty cycle, and so on) for calculating the
// desired replica count.
message AutoscalingMetricSpec {
// Required. The resource metric name.
// Supported metrics:
//
// * For Online Prediction:
// * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
// * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
string metric_name = 1 [(google.api.field_behavior) = REQUIRED];
// The target resource utilization in percentage (1% - 100%) for the given
// metric; once the real usage deviates from the target by a certain
// percentage, the machine replicas change. The default value is 60
// (representing 60%) if not provided.
int32 target = 2;
}
// A description of resources that are dedicated to a DeployedModel, and
// that need a higher degree of manual configuration.
message DedicatedResources {
// Required. Immutable. The specification of a single machine used by the
// prediction.
MachineSpec machine_spec = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
// Required. Immutable. The minimum number of machine replicas this
// DeployedModel will be always deployed on. This value must be greater than
// or equal to 1.
//
// If traffic against the DeployedModel increases, it may dynamically be
// deployed onto more replicas, and as traffic decreases, some of these extra
// replicas may be freed.
int32 min_replica_count = 2 [
(google.api.field_behavior) = REQUIRED,
(google.api.field_behavior) = IMMUTABLE
];
// Immutable. The maximum number of replicas this DeployedModel may be
// deployed on when the traffic against it increases. If the requested value
// is too large, the deployment will error, but if deployment succeeds then
// the ability to scale the model to that many replicas is guaranteed (barring
// service outages). If traffic against the DeployedModel increases beyond
// what its replicas at maximum may handle, a portion of the traffic will be
// dropped. If this value is not provided, will use
// [min_replica_count][google.cloud.visionai.v1.DedicatedResources.min_replica_count]
// as the default value.
//
// The value of this field impacts the charge against Vertex CPU and GPU
// quotas. Specifically, you will be charged for max_replica_count *
// number of cores in the selected machine type) and (max_replica_count *
// number of GPUs per replica in the selected machine type).
int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE];
// Immutable. The metric specifications that overrides a resource
// utilization metric (CPU utilization, accelerator's duty cycle, and so on)
// target value (default to 60 if not set). At most one entry is allowed per
// metric.
//
// If
// [machine_spec.accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count]
// is above 0, the autoscaling will be based on both CPU utilization and
// accelerator's duty cycle metrics and scale up when either metrics exceeds
// its target value while scale down if both metrics are under their target
// value. The default target value is 60 for both metrics.
//
// If
// [machine_spec.accelerator_count][google.cloud.visionai.v1.MachineSpec.accelerator_count]
// is 0, the autoscaling will be based on CPU utilization metric only with
// default target value 60 if not explicitly set.
//
// For example, in the case of Online Prediction, if you want to override
// target CPU utilization to 80, you should set
// [autoscaling_metric_specs.metric_name][google.cloud.visionai.v1.AutoscalingMetricSpec.metric_name]
// to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
// [autoscaling_metric_specs.target][google.cloud.visionai.v1.AutoscalingMetricSpec.target]
// to `80`.
repeated AutoscalingMetricSpec autoscaling_metric_specs = 4
[(google.api.field_behavior) = IMMUTABLE];
}
// Message describing ProductRecognizerConfig.
message ProductRecognizerConfig {
// The resource name of retail endpoint to use.
string retail_endpoint = 1;
// Confidence threshold to filter detection results. If not set, a system
// default value will be used.
float recognition_confidence_threshold = 2;
}
// Message describing TagRecognizerConfig.
message TagRecognizerConfig {
// Confidence threshold to filter detection results. If not set, a system
// default value will be used.
float entity_detection_confidence_threshold = 1;
// Configuration to customize how tags are parsed.
TagParsingConfig tag_parsing_config = 2;
}
// Configuration for tag parsing.
message TagParsingConfig {
// Configuration for parsing a tag entity class.
message EntityParsingConfig {
// Type of entity matching strategy.
enum EntityMatchingStrategy {
// If unspecified, multi-line matching will be used by default.
ENTITY_MATCHING_STRATEGY_UNSPECIFIED = 0;
// Matches multiple lines of text.
MULTI_LINE_MATCHING = 1;
// Matches the line with the maximum overlap area with entity bounding
// box.
MAX_OVERLAP_AREA = 2;
}
// Required. The tag entity class name. This should match the class name
// produced by the tag entity detection model.
string entity_class = 1 [(google.api.field_behavior) = REQUIRED];
// Optional. An regular expression hint.
string regex = 2 [(google.api.field_behavior) = OPTIONAL];
// Optional. Entity matching strategy.
EntityMatchingStrategy entity_matching_strategy = 3
[(google.api.field_behavior) = OPTIONAL];
}
// Each tag entity class may have an optional EntityParsingConfig which is
// used to help parse the entities of the class.
repeated EntityParsingConfig entity_parsing_configs = 1;
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy