All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.visionai.v1.VertexCustomConfig Maven / Gradle / Ivy

/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/visionai/v1/platform.proto

// Protobuf Java Version: 3.25.3
package com.google.cloud.visionai.v1;

/**
 *
 *
 * 
 * Message describing VertexCustomConfig.
 * 
* * Protobuf type {@code google.cloud.visionai.v1.VertexCustomConfig} */ public final class VertexCustomConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.visionai.v1.VertexCustomConfig) VertexCustomConfigOrBuilder { private static final long serialVersionUID = 0L; // Use VertexCustomConfig.newBuilder() to construct. private VertexCustomConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private VertexCustomConfig() { postProcessingCloudFunction_ = ""; dynamicConfigInputTopic_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new VertexCustomConfig(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.visionai.v1.PlatformProto .internal_static_google_cloud_visionai_v1_VertexCustomConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.visionai.v1.PlatformProto .internal_static_google_cloud_visionai_v1_VertexCustomConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.visionai.v1.VertexCustomConfig.class, com.google.cloud.visionai.v1.VertexCustomConfig.Builder.class); } private int bitField0_; public static final int MAX_PREDICTION_FPS_FIELD_NUMBER = 1; private int maxPredictionFps_ = 0; /** * * *
   * The max prediction frame per second. This attribute sets how fast the
   * operator sends prediction requests to Vertex AI endpoint. Default value is
   * 0, which means there is no max prediction fps limit. The operator sends
   * prediction requests at input fps.
   * 
* * int32 max_prediction_fps = 1; * * @return The maxPredictionFps. */ @java.lang.Override public int getMaxPredictionFps() { return maxPredictionFps_; } public static final int DEDICATED_RESOURCES_FIELD_NUMBER = 2; private com.google.cloud.visionai.v1.DedicatedResources dedicatedResources_; /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return Whether the dedicatedResources field is set. */ @java.lang.Override public boolean hasDedicatedResources() { return ((bitField0_ & 0x00000001) != 0); } /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return The dedicatedResources. */ @java.lang.Override public com.google.cloud.visionai.v1.DedicatedResources getDedicatedResources() { return dedicatedResources_ == null ? com.google.cloud.visionai.v1.DedicatedResources.getDefaultInstance() : dedicatedResources_; } /** * * *
   * A description of resources that are dedicated to the DeployedModel, and
   * that need a higher degree of manual configuration.
   * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ @java.lang.Override public com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder getDedicatedResourcesOrBuilder() { return dedicatedResources_ == null ? com.google.cloud.visionai.v1.DedicatedResources.getDefaultInstance() : dedicatedResources_; } public static final int POST_PROCESSING_CLOUD_FUNCTION_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object postProcessingCloudFunction_ = ""; /** * * *
   * If not empty, the prediction result will be sent to the specified cloud
   * function for post processing.
   * * The cloud function will receive AppPlatformCloudFunctionRequest where
   * the annotations field will be the json format of proto PredictResponse.
   * * The cloud function should return AppPlatformCloudFunctionResponse with
   * PredictResponse stored in the annotations field.
   * * To drop the prediction output, simply clear the payload field in the
   * returned AppPlatformCloudFunctionResponse.
   * 
* * string post_processing_cloud_function = 3; * * @return The postProcessingCloudFunction. */ @java.lang.Override public java.lang.String getPostProcessingCloudFunction() { java.lang.Object ref = postProcessingCloudFunction_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); postProcessingCloudFunction_ = s; return s; } } /** * * *
   * If not empty, the prediction result will be sent to the specified cloud
   * function for post processing.
   * * The cloud function will receive AppPlatformCloudFunctionRequest where
   * the annotations field will be the json format of proto PredictResponse.
   * * The cloud function should return AppPlatformCloudFunctionResponse with
   * PredictResponse stored in the annotations field.
   * * To drop the prediction output, simply clear the payload field in the
   * returned AppPlatformCloudFunctionResponse.
   * 
* * string post_processing_cloud_function = 3; * * @return The bytes for postProcessingCloudFunction. */ @java.lang.Override public com.google.protobuf.ByteString getPostProcessingCloudFunctionBytes() { java.lang.Object ref = postProcessingCloudFunction_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); postProcessingCloudFunction_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int ATTACH_APPLICATION_METADATA_FIELD_NUMBER = 4; private boolean attachApplicationMetadata_ = false; /** * * *
   * If true, the prediction request received by custom model will also contain
   * metadata with the following schema:
   * 'appPlatformMetadata': {
   *       'ingestionTime': DOUBLE; (UNIX timestamp)
   *       'application': STRING;
   *       'instanceId': STRING;
   *       'node': STRING;
   *       'processor': STRING;
   *  }
   * 
* * bool attach_application_metadata = 4; * * @return The attachApplicationMetadata. */ @java.lang.Override public boolean getAttachApplicationMetadata() { return attachApplicationMetadata_; } public static final int DYNAMIC_CONFIG_INPUT_TOPIC_FIELD_NUMBER = 6; @SuppressWarnings("serial") private volatile java.lang.Object dynamicConfigInputTopic_ = ""; /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return Whether the dynamicConfigInputTopic field is set. */ @java.lang.Override public boolean hasDynamicConfigInputTopic() { return ((bitField0_ & 0x00000002) != 0); } /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The dynamicConfigInputTopic. */ @java.lang.Override public java.lang.String getDynamicConfigInputTopic() { java.lang.Object ref = dynamicConfigInputTopic_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); dynamicConfigInputTopic_ = s; return s; } } /** * * *
   * Optional. By setting the configuration_input_topic, processor will
   * subscribe to given topic, only pub/sub topic is supported now. Example
   * channel:
   * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
   * message schema should be:
   * message Message {
   * // The ID of the stream that associates with the application instance.
   * string stream_id = 1;
   * // The target fps. By default, the custom processor will *not* send any
   * data to the Vertex Prediction container. Note that once the
   * dynamic_config_input_topic is set, max_prediction_fps will not work and be
   * preceded by the fps set inside the topic.
   * int32 fps = 2;
   * }
   * 
* * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The bytes for dynamicConfigInputTopic. */ @java.lang.Override public com.google.protobuf.ByteString getDynamicConfigInputTopicBytes() { java.lang.Object ref = dynamicConfigInputTopic_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); dynamicConfigInputTopic_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (maxPredictionFps_ != 0) { output.writeInt32(1, maxPredictionFps_); } if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(2, getDedicatedResources()); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(postProcessingCloudFunction_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, postProcessingCloudFunction_); } if (attachApplicationMetadata_ != false) { output.writeBool(4, attachApplicationMetadata_); } if (((bitField0_ & 0x00000002) != 0)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 6, dynamicConfigInputTopic_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (maxPredictionFps_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, maxPredictionFps_); } if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getDedicatedResources()); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(postProcessingCloudFunction_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, postProcessingCloudFunction_); } if (attachApplicationMetadata_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(4, attachApplicationMetadata_); } if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, dynamicConfigInputTopic_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.visionai.v1.VertexCustomConfig)) { return super.equals(obj); } com.google.cloud.visionai.v1.VertexCustomConfig other = (com.google.cloud.visionai.v1.VertexCustomConfig) obj; if (getMaxPredictionFps() != other.getMaxPredictionFps()) return false; if (hasDedicatedResources() != other.hasDedicatedResources()) return false; if (hasDedicatedResources()) { if (!getDedicatedResources().equals(other.getDedicatedResources())) return false; } if (!getPostProcessingCloudFunction().equals(other.getPostProcessingCloudFunction())) return false; if (getAttachApplicationMetadata() != other.getAttachApplicationMetadata()) return false; if (hasDynamicConfigInputTopic() != other.hasDynamicConfigInputTopic()) return false; if (hasDynamicConfigInputTopic()) { if (!getDynamicConfigInputTopic().equals(other.getDynamicConfigInputTopic())) return false; } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + MAX_PREDICTION_FPS_FIELD_NUMBER; hash = (53 * hash) + getMaxPredictionFps(); if (hasDedicatedResources()) { hash = (37 * hash) + DEDICATED_RESOURCES_FIELD_NUMBER; hash = (53 * hash) + getDedicatedResources().hashCode(); } hash = (37 * hash) + POST_PROCESSING_CLOUD_FUNCTION_FIELD_NUMBER; hash = (53 * hash) + getPostProcessingCloudFunction().hashCode(); hash = (37 * hash) + ATTACH_APPLICATION_METADATA_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getAttachApplicationMetadata()); if (hasDynamicConfigInputTopic()) { hash = (37 * hash) + DYNAMIC_CONFIG_INPUT_TOPIC_FIELD_NUMBER; hash = (53 * hash) + getDynamicConfigInputTopic().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.visionai.v1.VertexCustomConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.visionai.v1.VertexCustomConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Message describing VertexCustomConfig.
   * 
* * Protobuf type {@code google.cloud.visionai.v1.VertexCustomConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.visionai.v1.VertexCustomConfig) com.google.cloud.visionai.v1.VertexCustomConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.visionai.v1.PlatformProto .internal_static_google_cloud_visionai_v1_VertexCustomConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.visionai.v1.PlatformProto .internal_static_google_cloud_visionai_v1_VertexCustomConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.visionai.v1.VertexCustomConfig.class, com.google.cloud.visionai.v1.VertexCustomConfig.Builder.class); } // Construct using com.google.cloud.visionai.v1.VertexCustomConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getDedicatedResourcesFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; maxPredictionFps_ = 0; dedicatedResources_ = null; if (dedicatedResourcesBuilder_ != null) { dedicatedResourcesBuilder_.dispose(); dedicatedResourcesBuilder_ = null; } postProcessingCloudFunction_ = ""; attachApplicationMetadata_ = false; dynamicConfigInputTopic_ = ""; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.visionai.v1.PlatformProto .internal_static_google_cloud_visionai_v1_VertexCustomConfig_descriptor; } @java.lang.Override public com.google.cloud.visionai.v1.VertexCustomConfig getDefaultInstanceForType() { return com.google.cloud.visionai.v1.VertexCustomConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.visionai.v1.VertexCustomConfig build() { com.google.cloud.visionai.v1.VertexCustomConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.visionai.v1.VertexCustomConfig buildPartial() { com.google.cloud.visionai.v1.VertexCustomConfig result = new com.google.cloud.visionai.v1.VertexCustomConfig(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(com.google.cloud.visionai.v1.VertexCustomConfig result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.maxPredictionFps_ = maxPredictionFps_; } int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.dedicatedResources_ = dedicatedResourcesBuilder_ == null ? dedicatedResources_ : dedicatedResourcesBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { result.postProcessingCloudFunction_ = postProcessingCloudFunction_; } if (((from_bitField0_ & 0x00000008) != 0)) { result.attachApplicationMetadata_ = attachApplicationMetadata_; } if (((from_bitField0_ & 0x00000010) != 0)) { result.dynamicConfigInputTopic_ = dynamicConfigInputTopic_; to_bitField0_ |= 0x00000002; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.visionai.v1.VertexCustomConfig) { return mergeFrom((com.google.cloud.visionai.v1.VertexCustomConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.visionai.v1.VertexCustomConfig other) { if (other == com.google.cloud.visionai.v1.VertexCustomConfig.getDefaultInstance()) return this; if (other.getMaxPredictionFps() != 0) { setMaxPredictionFps(other.getMaxPredictionFps()); } if (other.hasDedicatedResources()) { mergeDedicatedResources(other.getDedicatedResources()); } if (!other.getPostProcessingCloudFunction().isEmpty()) { postProcessingCloudFunction_ = other.postProcessingCloudFunction_; bitField0_ |= 0x00000004; onChanged(); } if (other.getAttachApplicationMetadata() != false) { setAttachApplicationMetadata(other.getAttachApplicationMetadata()); } if (other.hasDynamicConfigInputTopic()) { dynamicConfigInputTopic_ = other.dynamicConfigInputTopic_; bitField0_ |= 0x00000010; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { maxPredictionFps_ = input.readInt32(); bitField0_ |= 0x00000001; break; } // case 8 case 18: { input.readMessage( getDedicatedResourcesFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000002; break; } // case 18 case 26: { postProcessingCloudFunction_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000004; break; } // case 26 case 32: { attachApplicationMetadata_ = input.readBool(); bitField0_ |= 0x00000008; break; } // case 32 case 50: { dynamicConfigInputTopic_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000010; break; } // case 50 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int maxPredictionFps_; /** * * *
     * The max prediction frame per second. This attribute sets how fast the
     * operator sends prediction requests to Vertex AI endpoint. Default value is
     * 0, which means there is no max prediction fps limit. The operator sends
     * prediction requests at input fps.
     * 
* * int32 max_prediction_fps = 1; * * @return The maxPredictionFps. */ @java.lang.Override public int getMaxPredictionFps() { return maxPredictionFps_; } /** * * *
     * The max prediction frame per second. This attribute sets how fast the
     * operator sends prediction requests to Vertex AI endpoint. Default value is
     * 0, which means there is no max prediction fps limit. The operator sends
     * prediction requests at input fps.
     * 
* * int32 max_prediction_fps = 1; * * @param value The maxPredictionFps to set. * @return This builder for chaining. */ public Builder setMaxPredictionFps(int value) { maxPredictionFps_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * * *
     * The max prediction frame per second. This attribute sets how fast the
     * operator sends prediction requests to Vertex AI endpoint. Default value is
     * 0, which means there is no max prediction fps limit. The operator sends
     * prediction requests at input fps.
     * 
* * int32 max_prediction_fps = 1; * * @return This builder for chaining. */ public Builder clearMaxPredictionFps() { bitField0_ = (bitField0_ & ~0x00000001); maxPredictionFps_ = 0; onChanged(); return this; } private com.google.cloud.visionai.v1.DedicatedResources dedicatedResources_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.visionai.v1.DedicatedResources, com.google.cloud.visionai.v1.DedicatedResources.Builder, com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder> dedicatedResourcesBuilder_; /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return Whether the dedicatedResources field is set. */ public boolean hasDedicatedResources() { return ((bitField0_ & 0x00000002) != 0); } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; * * @return The dedicatedResources. */ public com.google.cloud.visionai.v1.DedicatedResources getDedicatedResources() { if (dedicatedResourcesBuilder_ == null) { return dedicatedResources_ == null ? com.google.cloud.visionai.v1.DedicatedResources.getDefaultInstance() : dedicatedResources_; } else { return dedicatedResourcesBuilder_.getMessage(); } } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public Builder setDedicatedResources(com.google.cloud.visionai.v1.DedicatedResources value) { if (dedicatedResourcesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } dedicatedResources_ = value; } else { dedicatedResourcesBuilder_.setMessage(value); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public Builder setDedicatedResources( com.google.cloud.visionai.v1.DedicatedResources.Builder builderForValue) { if (dedicatedResourcesBuilder_ == null) { dedicatedResources_ = builderForValue.build(); } else { dedicatedResourcesBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public Builder mergeDedicatedResources(com.google.cloud.visionai.v1.DedicatedResources value) { if (dedicatedResourcesBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && dedicatedResources_ != null && dedicatedResources_ != com.google.cloud.visionai.v1.DedicatedResources.getDefaultInstance()) { getDedicatedResourcesBuilder().mergeFrom(value); } else { dedicatedResources_ = value; } } else { dedicatedResourcesBuilder_.mergeFrom(value); } if (dedicatedResources_ != null) { bitField0_ |= 0x00000002; onChanged(); } return this; } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public Builder clearDedicatedResources() { bitField0_ = (bitField0_ & ~0x00000002); dedicatedResources_ = null; if (dedicatedResourcesBuilder_ != null) { dedicatedResourcesBuilder_.dispose(); dedicatedResourcesBuilder_ = null; } onChanged(); return this; } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public com.google.cloud.visionai.v1.DedicatedResources.Builder getDedicatedResourcesBuilder() { bitField0_ |= 0x00000002; onChanged(); return getDedicatedResourcesFieldBuilder().getBuilder(); } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ public com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder getDedicatedResourcesOrBuilder() { if (dedicatedResourcesBuilder_ != null) { return dedicatedResourcesBuilder_.getMessageOrBuilder(); } else { return dedicatedResources_ == null ? com.google.cloud.visionai.v1.DedicatedResources.getDefaultInstance() : dedicatedResources_; } } /** * * *
     * A description of resources that are dedicated to the DeployedModel, and
     * that need a higher degree of manual configuration.
     * 
* * .google.cloud.visionai.v1.DedicatedResources dedicated_resources = 2; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.visionai.v1.DedicatedResources, com.google.cloud.visionai.v1.DedicatedResources.Builder, com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder> getDedicatedResourcesFieldBuilder() { if (dedicatedResourcesBuilder_ == null) { dedicatedResourcesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.visionai.v1.DedicatedResources, com.google.cloud.visionai.v1.DedicatedResources.Builder, com.google.cloud.visionai.v1.DedicatedResourcesOrBuilder>( getDedicatedResources(), getParentForChildren(), isClean()); dedicatedResources_ = null; } return dedicatedResourcesBuilder_; } private java.lang.Object postProcessingCloudFunction_ = ""; /** * * *
     * If not empty, the prediction result will be sent to the specified cloud
     * function for post processing.
     * * The cloud function will receive AppPlatformCloudFunctionRequest where
     * the annotations field will be the json format of proto PredictResponse.
     * * The cloud function should return AppPlatformCloudFunctionResponse with
     * PredictResponse stored in the annotations field.
     * * To drop the prediction output, simply clear the payload field in the
     * returned AppPlatformCloudFunctionResponse.
     * 
* * string post_processing_cloud_function = 3; * * @return The postProcessingCloudFunction. */ public java.lang.String getPostProcessingCloudFunction() { java.lang.Object ref = postProcessingCloudFunction_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); postProcessingCloudFunction_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * If not empty, the prediction result will be sent to the specified cloud
     * function for post processing.
     * * The cloud function will receive AppPlatformCloudFunctionRequest where
     * the annotations field will be the json format of proto PredictResponse.
     * * The cloud function should return AppPlatformCloudFunctionResponse with
     * PredictResponse stored in the annotations field.
     * * To drop the prediction output, simply clear the payload field in the
     * returned AppPlatformCloudFunctionResponse.
     * 
* * string post_processing_cloud_function = 3; * * @return The bytes for postProcessingCloudFunction. */ public com.google.protobuf.ByteString getPostProcessingCloudFunctionBytes() { java.lang.Object ref = postProcessingCloudFunction_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); postProcessingCloudFunction_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * If not empty, the prediction result will be sent to the specified cloud
     * function for post processing.
     * * The cloud function will receive AppPlatformCloudFunctionRequest where
     * the annotations field will be the json format of proto PredictResponse.
     * * The cloud function should return AppPlatformCloudFunctionResponse with
     * PredictResponse stored in the annotations field.
     * * To drop the prediction output, simply clear the payload field in the
     * returned AppPlatformCloudFunctionResponse.
     * 
* * string post_processing_cloud_function = 3; * * @param value The postProcessingCloudFunction to set. * @return This builder for chaining. */ public Builder setPostProcessingCloudFunction(java.lang.String value) { if (value == null) { throw new NullPointerException(); } postProcessingCloudFunction_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * If not empty, the prediction result will be sent to the specified cloud
     * function for post processing.
     * * The cloud function will receive AppPlatformCloudFunctionRequest where
     * the annotations field will be the json format of proto PredictResponse.
     * * The cloud function should return AppPlatformCloudFunctionResponse with
     * PredictResponse stored in the annotations field.
     * * To drop the prediction output, simply clear the payload field in the
     * returned AppPlatformCloudFunctionResponse.
     * 
* * string post_processing_cloud_function = 3; * * @return This builder for chaining. */ public Builder clearPostProcessingCloudFunction() { postProcessingCloudFunction_ = getDefaultInstance().getPostProcessingCloudFunction(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } /** * * *
     * If not empty, the prediction result will be sent to the specified cloud
     * function for post processing.
     * * The cloud function will receive AppPlatformCloudFunctionRequest where
     * the annotations field will be the json format of proto PredictResponse.
     * * The cloud function should return AppPlatformCloudFunctionResponse with
     * PredictResponse stored in the annotations field.
     * * To drop the prediction output, simply clear the payload field in the
     * returned AppPlatformCloudFunctionResponse.
     * 
* * string post_processing_cloud_function = 3; * * @param value The bytes for postProcessingCloudFunction to set. * @return This builder for chaining. */ public Builder setPostProcessingCloudFunctionBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); postProcessingCloudFunction_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } private boolean attachApplicationMetadata_; /** * * *
     * If true, the prediction request received by custom model will also contain
     * metadata with the following schema:
     * 'appPlatformMetadata': {
     *       'ingestionTime': DOUBLE; (UNIX timestamp)
     *       'application': STRING;
     *       'instanceId': STRING;
     *       'node': STRING;
     *       'processor': STRING;
     *  }
     * 
* * bool attach_application_metadata = 4; * * @return The attachApplicationMetadata. */ @java.lang.Override public boolean getAttachApplicationMetadata() { return attachApplicationMetadata_; } /** * * *
     * If true, the prediction request received by custom model will also contain
     * metadata with the following schema:
     * 'appPlatformMetadata': {
     *       'ingestionTime': DOUBLE; (UNIX timestamp)
     *       'application': STRING;
     *       'instanceId': STRING;
     *       'node': STRING;
     *       'processor': STRING;
     *  }
     * 
* * bool attach_application_metadata = 4; * * @param value The attachApplicationMetadata to set. * @return This builder for chaining. */ public Builder setAttachApplicationMetadata(boolean value) { attachApplicationMetadata_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * If true, the prediction request received by custom model will also contain
     * metadata with the following schema:
     * 'appPlatformMetadata': {
     *       'ingestionTime': DOUBLE; (UNIX timestamp)
     *       'application': STRING;
     *       'instanceId': STRING;
     *       'node': STRING;
     *       'processor': STRING;
     *  }
     * 
* * bool attach_application_metadata = 4; * * @return This builder for chaining. */ public Builder clearAttachApplicationMetadata() { bitField0_ = (bitField0_ & ~0x00000008); attachApplicationMetadata_ = false; onChanged(); return this; } private java.lang.Object dynamicConfigInputTopic_ = ""; /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return Whether the dynamicConfigInputTopic field is set. */ public boolean hasDynamicConfigInputTopic() { return ((bitField0_ & 0x00000010) != 0); } /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The dynamicConfigInputTopic. */ public java.lang.String getDynamicConfigInputTopic() { java.lang.Object ref = dynamicConfigInputTopic_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); dynamicConfigInputTopic_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The bytes for dynamicConfigInputTopic. */ public com.google.protobuf.ByteString getDynamicConfigInputTopicBytes() { java.lang.Object ref = dynamicConfigInputTopic_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); dynamicConfigInputTopic_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @param value The dynamicConfigInputTopic to set. * @return This builder for chaining. */ public Builder setDynamicConfigInputTopic(java.lang.String value) { if (value == null) { throw new NullPointerException(); } dynamicConfigInputTopic_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @return This builder for chaining. */ public Builder clearDynamicConfigInputTopic() { dynamicConfigInputTopic_ = getDefaultInstance().getDynamicConfigInputTopic(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * * *
     * Optional. By setting the configuration_input_topic, processor will
     * subscribe to given topic, only pub/sub topic is supported now. Example
     * channel:
     * //pubsub.googleapis.com/projects/visionai-testing-stable/topics/test-topic
     * message schema should be:
     * message Message {
     * // The ID of the stream that associates with the application instance.
     * string stream_id = 1;
     * // The target fps. By default, the custom processor will *not* send any
     * data to the Vertex Prediction container. Note that once the
     * dynamic_config_input_topic is set, max_prediction_fps will not work and be
     * preceded by the fps set inside the topic.
     * int32 fps = 2;
     * }
     * 
* * * optional string dynamic_config_input_topic = 6 [(.google.api.field_behavior) = OPTIONAL]; * * * @param value The bytes for dynamicConfigInputTopic to set. * @return This builder for chaining. */ public Builder setDynamicConfigInputTopicBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); dynamicConfigInputTopic_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.visionai.v1.VertexCustomConfig) } // @@protoc_insertion_point(class_scope:google.cloud.visionai.v1.VertexCustomConfig) private static final com.google.cloud.visionai.v1.VertexCustomConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.visionai.v1.VertexCustomConfig(); } public static com.google.cloud.visionai.v1.VertexCustomConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public VertexCustomConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.visionai.v1.VertexCustomConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy