All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.visionai.v1.VertexCustomConfigOrBuilder Maven / Gradle / Ivy

/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/visionai/v1/platform.proto

// Protobuf Java Version: 3.25.3
package com.google.cloud.visionai.v1;

public interface VertexCustomConfigOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.visionai.v1.VertexCustomConfig)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * The max prediction frame per second. This attribute sets how fast the
   * operator sends prediction requests to Vertex AI endpoint. Default value is
   * 0, which means there is no max prediction fps limit. The operator sends
   * prediction requests at input fps.
   * 
* * int32 max_prediction_fps = 1; * * @return The maxPredictionFps. */ int getMaxPredictionFps(); /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return Whether the dedicatedResources field is set. */ boolean hasDedicatedResources(); /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return The dedicatedResources. */ com.google.cloud.visionai.v1.DedicatedResources getDedicatedResources(); /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder getDedicatedResourcesOrBuilder(); /** * * *
   * If not empty, the prediction result will be sent to the specified cloud
   * function for post processing.
   * * The cloud function will receive AppPlatformCloudFunctionRequest where
   * the annotations field will be the json format of proto PredictResponse.
   * * The cloud function should return AppPlatformCloudFunctionResponse with
   * PredictResponse stored in the annotations field.
   * * To drop the prediction output, simply clear the payload field in the
   * returned AppPlatformCloudFunctionResponse.
   * 
* * string post_processing_cloud_function = 3; * * @return The postProcessingCloudFunction. */ java.lang.String getPostProcessingCloudFunction(); /** * * *
   * If not empty, the prediction result will be sent to the specified cloud
   * function for post processing.
   * * The cloud function will receive AppPlatformCloudFunctionRequest where
   * the annotations field will be the json format of proto PredictResponse.
   * * The cloud function should return AppPlatformCloudFunctionResponse with
   * PredictResponse stored in the annotations field.
   * * To drop the prediction output, simply clear the payload field in the
   * returned AppPlatformCloudFunctionResponse.
   * 
* * string post_processing_cloud_function = 3; * * @return The bytes for postProcessingCloudFunction. */ com.google.protobuf.ByteString getPostProcessingCloudFunctionBytes(); /** * * *
   * If true, the prediction request received by custom model will also contain
   * metadata with the following schema:
   * 'appPlatformMetadata': {
   *       'ingestionTime': DOUBLE; (UNIX timestamp)
   *       'application': STRING;
   *       'instanceId': STRING;
   *       'node': STRING;
   *       'processor': STRING;
   *  }
   * 
* * bool attach_application_metadata = 4; * * @return The attachApplicationMetadata. */ boolean getAttachApplicationMetadata(); /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return Whether the dynamicConfigInputTopic field is set. */ boolean hasDynamicConfigInputTopic(); /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The dynamicConfigInputTopic. */ java.lang.String getDynamicConfigInputTopic(); /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The bytes for dynamicConfigInputTopic. */ com.google.protobuf.ByteString getDynamicConfigInputTopicBytes(); }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy