All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.automl.v1.BatchPredictRequest Maven / Gradle / Ivy

There is a newer version: 2.54.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/automl/v1/prediction_service.proto

// Protobuf Java Version: 3.25.4
package com.google.cloud.automl.v1;

/**
 *
 *
 * 
 * Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
 * 
* * Protobuf type {@code google.cloud.automl.v1.BatchPredictRequest} */ public final class BatchPredictRequest extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.automl.v1.BatchPredictRequest) BatchPredictRequestOrBuilder { private static final long serialVersionUID = 0L; // Use BatchPredictRequest.newBuilder() to construct. private BatchPredictRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private BatchPredictRequest() { name_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new BatchPredictRequest(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_descriptor; } @SuppressWarnings({"rawtypes"}) @java.lang.Override protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( int number) { switch (number) { case 5: return internalGetParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1.BatchPredictRequest.class, com.google.cloud.automl.v1.BatchPredictRequest.Builder.class); } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; @SuppressWarnings("serial") private volatile java.lang.Object name_ = ""; /** * * *
   * Required. Name of the model requested to serve the batch prediction.
   * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @return The name. */ @java.lang.Override public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } } /** * * *
   * Required. Name of the model requested to serve the batch prediction.
   * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @return The bytes for name. */ @java.lang.Override public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int INPUT_CONFIG_FIELD_NUMBER = 3; private com.google.cloud.automl.v1.BatchPredictInputConfig inputConfig_; /** * * *
   * Required. The input configuration for batch prediction.
   * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * * * @return Whether the inputConfig field is set. */ @java.lang.Override public boolean hasInputConfig() { return ((bitField0_ & 0x00000001) != 0); } /** * * *
   * Required. The input configuration for batch prediction.
   * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * * * @return The inputConfig. */ @java.lang.Override public com.google.cloud.automl.v1.BatchPredictInputConfig getInputConfig() { return inputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictInputConfig.getDefaultInstance() : inputConfig_; } /** * * *
   * Required. The input configuration for batch prediction.
   * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ @java.lang.Override public com.google.cloud.automl.v1.BatchPredictInputConfigOrBuilder getInputConfigOrBuilder() { return inputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictInputConfig.getDefaultInstance() : inputConfig_; } public static final int OUTPUT_CONFIG_FIELD_NUMBER = 4; private com.google.cloud.automl.v1.BatchPredictOutputConfig outputConfig_; /** * * *
   * Required. The Configuration specifying where output predictions should
   * be written.
   * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * * * @return Whether the outputConfig field is set. */ @java.lang.Override public boolean hasOutputConfig() { return ((bitField0_ & 0x00000002) != 0); } /** * * *
   * Required. The Configuration specifying where output predictions should
   * be written.
   * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * * * @return The outputConfig. */ @java.lang.Override public com.google.cloud.automl.v1.BatchPredictOutputConfig getOutputConfig() { return outputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictOutputConfig.getDefaultInstance() : outputConfig_; } /** * * *
   * Required. The Configuration specifying where output predictions should
   * be written.
   * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ @java.lang.Override public com.google.cloud.automl.v1.BatchPredictOutputConfigOrBuilder getOutputConfigOrBuilder() { return outputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictOutputConfig.getDefaultInstance() : outputConfig_; } public static final int PARAMS_FIELD_NUMBER = 5; private static final class ParamsDefaultEntryHolder { static final com.google.protobuf.MapEntry defaultEntry = com.google.protobuf.MapEntry.newDefaultInstance( com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_ParamsEntry_descriptor, com.google.protobuf.WireFormat.FieldType.STRING, "", com.google.protobuf.WireFormat.FieldType.STRING, ""); } @SuppressWarnings("serial") private com.google.protobuf.MapField params_; private com.google.protobuf.MapField internalGetParams() { if (params_ == null) { return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry); } return params_; } public int getParamsCount() { return internalGetParams().getMap().size(); } /** * * *
   * Additional domain-specific parameters for the predictions, any string must
   * be up to 25000 characters long.
   *
   * AutoML Natural Language Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a text snippet, it will only produce results
   *   that have at least this confidence score. The default is 0.5.
   *
   *
   * AutoML Vision Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for an image, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * AutoML Vision Object Detection
   *
   * `score_threshold`
   * : (float) When Model detects objects on the image,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   * AutoML Video Intelligence Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a video, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * `segment_classification`
   * : (boolean) Set to true to request
   *   segment-level classification. AutoML Video Intelligence returns
   *   labels and their confidence scores for the entire segment of the
   *   video that user specified in the request configuration.
   *   The default is true.
   *
   * `shot_classification`
   * : (boolean) Set to true to request shot-level
   *   classification. AutoML Video Intelligence determines the boundaries
   *   for each camera shot in the entire segment of the video that user
   *   specified in the request configuration. AutoML Video Intelligence
   *   then returns labels and their confidence scores for each detected
   *   shot, along with the start and end time of the shot.
   *   The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification type,
   *   the quality of it depends on training data, but there are no metrics
   *   provided to describe that quality.
   *
   * `1s_interval_classification`
   * : (boolean) Set to true to request
   *   classification for a video at one-second intervals. AutoML Video
   *   Intelligence returns labels and their confidence scores for each
   *   second of the entire segment of the video that user specified in the
   *   request configuration. The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification
   *   type, the quality of it depends on training data, but there are no
   *   metrics provided to describe that quality.
   *
   * AutoML Video Intelligence Object Tracking
   *
   * `score_threshold`
   * : (float) When Model detects objects on video frames,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   *
   * `min_bounding_box_size`
   * : (float) Only bounding boxes with shortest edge
   *   at least that long as a relative value of video frame size are
   *   returned. Value in 0 to 1 range. Default is 0.
   * 
* * map<string, string> params = 5; */ @java.lang.Override public boolean containsParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetParams().getMap().containsKey(key); } /** Use {@link #getParamsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getParams() { return getParamsMap(); } /** * * *
   * Additional domain-specific parameters for the predictions, any string must
   * be up to 25000 characters long.
   *
   * AutoML Natural Language Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a text snippet, it will only produce results
   *   that have at least this confidence score. The default is 0.5.
   *
   *
   * AutoML Vision Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for an image, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * AutoML Vision Object Detection
   *
   * `score_threshold`
   * : (float) When Model detects objects on the image,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   * AutoML Video Intelligence Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a video, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * `segment_classification`
   * : (boolean) Set to true to request
   *   segment-level classification. AutoML Video Intelligence returns
   *   labels and their confidence scores for the entire segment of the
   *   video that user specified in the request configuration.
   *   The default is true.
   *
   * `shot_classification`
   * : (boolean) Set to true to request shot-level
   *   classification. AutoML Video Intelligence determines the boundaries
   *   for each camera shot in the entire segment of the video that user
   *   specified in the request configuration. AutoML Video Intelligence
   *   then returns labels and their confidence scores for each detected
   *   shot, along with the start and end time of the shot.
   *   The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification type,
   *   the quality of it depends on training data, but there are no metrics
   *   provided to describe that quality.
   *
   * `1s_interval_classification`
   * : (boolean) Set to true to request
   *   classification for a video at one-second intervals. AutoML Video
   *   Intelligence returns labels and their confidence scores for each
   *   second of the entire segment of the video that user specified in the
   *   request configuration. The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification
   *   type, the quality of it depends on training data, but there are no
   *   metrics provided to describe that quality.
   *
   * AutoML Video Intelligence Object Tracking
   *
   * `score_threshold`
   * : (float) When Model detects objects on video frames,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   *
   * `min_bounding_box_size`
   * : (float) Only bounding boxes with shortest edge
   *   at least that long as a relative value of video frame size are
   *   returned. Value in 0 to 1 range. Default is 0.
   * 
* * map<string, string> params = 5; */ @java.lang.Override public java.util.Map getParamsMap() { return internalGetParams().getMap(); } /** * * *
   * Additional domain-specific parameters for the predictions, any string must
   * be up to 25000 characters long.
   *
   * AutoML Natural Language Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a text snippet, it will only produce results
   *   that have at least this confidence score. The default is 0.5.
   *
   *
   * AutoML Vision Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for an image, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * AutoML Vision Object Detection
   *
   * `score_threshold`
   * : (float) When Model detects objects on the image,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   * AutoML Video Intelligence Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a video, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * `segment_classification`
   * : (boolean) Set to true to request
   *   segment-level classification. AutoML Video Intelligence returns
   *   labels and their confidence scores for the entire segment of the
   *   video that user specified in the request configuration.
   *   The default is true.
   *
   * `shot_classification`
   * : (boolean) Set to true to request shot-level
   *   classification. AutoML Video Intelligence determines the boundaries
   *   for each camera shot in the entire segment of the video that user
   *   specified in the request configuration. AutoML Video Intelligence
   *   then returns labels and their confidence scores for each detected
   *   shot, along with the start and end time of the shot.
   *   The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification type,
   *   the quality of it depends on training data, but there are no metrics
   *   provided to describe that quality.
   *
   * `1s_interval_classification`
   * : (boolean) Set to true to request
   *   classification for a video at one-second intervals. AutoML Video
   *   Intelligence returns labels and their confidence scores for each
   *   second of the entire segment of the video that user specified in the
   *   request configuration. The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification
   *   type, the quality of it depends on training data, but there are no
   *   metrics provided to describe that quality.
   *
   * AutoML Video Intelligence Object Tracking
   *
   * `score_threshold`
   * : (float) When Model detects objects on video frames,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   *
   * `min_bounding_box_size`
   * : (float) Only bounding boxes with shortest edge
   *   at least that long as a relative value of video frame size are
   *   returned. Value in 0 to 1 range. Default is 0.
   * 
* * map<string, string> params = 5; */ @java.lang.Override public /* nullable */ java.lang.String getParamsOrDefault( java.lang.String key, /* nullable */ java.lang.String defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** * * *
   * Additional domain-specific parameters for the predictions, any string must
   * be up to 25000 characters long.
   *
   * AutoML Natural Language Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a text snippet, it will only produce results
   *   that have at least this confidence score. The default is 0.5.
   *
   *
   * AutoML Vision Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for an image, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * AutoML Vision Object Detection
   *
   * `score_threshold`
   * : (float) When Model detects objects on the image,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   * AutoML Video Intelligence Classification
   *
   * `score_threshold`
   * : (float) A value from 0.0 to 1.0. When the model
   *   makes predictions for a video, it will only produce results that
   *   have at least this confidence score. The default is 0.5.
   *
   * `segment_classification`
   * : (boolean) Set to true to request
   *   segment-level classification. AutoML Video Intelligence returns
   *   labels and their confidence scores for the entire segment of the
   *   video that user specified in the request configuration.
   *   The default is true.
   *
   * `shot_classification`
   * : (boolean) Set to true to request shot-level
   *   classification. AutoML Video Intelligence determines the boundaries
   *   for each camera shot in the entire segment of the video that user
   *   specified in the request configuration. AutoML Video Intelligence
   *   then returns labels and their confidence scores for each detected
   *   shot, along with the start and end time of the shot.
   *   The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification type,
   *   the quality of it depends on training data, but there are no metrics
   *   provided to describe that quality.
   *
   * `1s_interval_classification`
   * : (boolean) Set to true to request
   *   classification for a video at one-second intervals. AutoML Video
   *   Intelligence returns labels and their confidence scores for each
   *   second of the entire segment of the video that user specified in the
   *   request configuration. The default is false.
   *
   *   WARNING: Model evaluation is not done for this classification
   *   type, the quality of it depends on training data, but there are no
   *   metrics provided to describe that quality.
   *
   * AutoML Video Intelligence Object Tracking
   *
   * `score_threshold`
   * : (float) When Model detects objects on video frames,
   *   it will only produce bounding boxes which have at least this
   *   confidence score. Value in 0 to 1 range, default is 0.5.
   *
   * `max_bounding_box_count`
   * : (int64) The maximum number of bounding
   *   boxes returned per image. The default is 100, the
   *   number of bounding boxes returned might be limited by the server.
   *
   * `min_bounding_box_size`
   * : (float) Only bounding boxes with shortest edge
   *   at least that long as a relative value of video frame size are
   *   returned. Value in 0 to 1 range. Default is 0.
   * 
* * map<string, string> params = 5; */ @java.lang.Override public java.lang.String getParamsOrThrow(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(3, getInputConfig()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(4, getOutputConfig()); } com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( output, internalGetParams(), ParamsDefaultEntryHolder.defaultEntry, 5); getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getInputConfig()); } if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getOutputConfig()); } for (java.util.Map.Entry entry : internalGetParams().getMap().entrySet()) { com.google.protobuf.MapEntry params__ = ParamsDefaultEntryHolder.defaultEntry .newBuilderForType() .setKey(entry.getKey()) .setValue(entry.getValue()) .build(); size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, params__); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.automl.v1.BatchPredictRequest)) { return super.equals(obj); } com.google.cloud.automl.v1.BatchPredictRequest other = (com.google.cloud.automl.v1.BatchPredictRequest) obj; if (!getName().equals(other.getName())) return false; if (hasInputConfig() != other.hasInputConfig()) return false; if (hasInputConfig()) { if (!getInputConfig().equals(other.getInputConfig())) return false; } if (hasOutputConfig() != other.hasOutputConfig()) return false; if (hasOutputConfig()) { if (!getOutputConfig().equals(other.getOutputConfig())) return false; } if (!internalGetParams().equals(other.internalGetParams())) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); if (hasInputConfig()) { hash = (37 * hash) + INPUT_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getInputConfig().hashCode(); } if (hasOutputConfig()) { hash = (37 * hash) + OUTPUT_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getOutputConfig().hashCode(); } if (!internalGetParams().getMap().isEmpty()) { hash = (37 * hash) + PARAMS_FIELD_NUMBER; hash = (53 * hash) + internalGetParams().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1.BatchPredictRequest parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.BatchPredictRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.BatchPredictRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.automl.v1.BatchPredictRequest prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
   * 
* * Protobuf type {@code google.cloud.automl.v1.BatchPredictRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.automl.v1.BatchPredictRequest) com.google.cloud.automl.v1.BatchPredictRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_descriptor; } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( int number) { switch (number) { case 5: return internalGetParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( int number) { switch (number) { case 5: return internalGetMutableParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1.BatchPredictRequest.class, com.google.cloud.automl.v1.BatchPredictRequest.Builder.class); } // Construct using com.google.cloud.automl.v1.BatchPredictRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getInputConfigFieldBuilder(); getOutputConfigFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; name_ = ""; inputConfig_ = null; if (inputConfigBuilder_ != null) { inputConfigBuilder_.dispose(); inputConfigBuilder_ = null; } outputConfig_ = null; if (outputConfigBuilder_ != null) { outputConfigBuilder_.dispose(); outputConfigBuilder_ = null; } internalGetMutableParams().clear(); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.automl.v1.PredictionServiceProto .internal_static_google_cloud_automl_v1_BatchPredictRequest_descriptor; } @java.lang.Override public com.google.cloud.automl.v1.BatchPredictRequest getDefaultInstanceForType() { return com.google.cloud.automl.v1.BatchPredictRequest.getDefaultInstance(); } @java.lang.Override public com.google.cloud.automl.v1.BatchPredictRequest build() { com.google.cloud.automl.v1.BatchPredictRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.automl.v1.BatchPredictRequest buildPartial() { com.google.cloud.automl.v1.BatchPredictRequest result = new com.google.cloud.automl.v1.BatchPredictRequest(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(com.google.cloud.automl.v1.BatchPredictRequest result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.name_ = name_; } int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000002) != 0)) { result.inputConfig_ = inputConfigBuilder_ == null ? inputConfig_ : inputConfigBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000004) != 0)) { result.outputConfig_ = outputConfigBuilder_ == null ? outputConfig_ : outputConfigBuilder_.build(); to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000008) != 0)) { result.params_ = internalGetParams(); result.params_.makeImmutable(); } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.automl.v1.BatchPredictRequest) { return mergeFrom((com.google.cloud.automl.v1.BatchPredictRequest) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.automl.v1.BatchPredictRequest other) { if (other == com.google.cloud.automl.v1.BatchPredictRequest.getDefaultInstance()) return this; if (!other.getName().isEmpty()) { name_ = other.name_; bitField0_ |= 0x00000001; onChanged(); } if (other.hasInputConfig()) { mergeInputConfig(other.getInputConfig()); } if (other.hasOutputConfig()) { mergeOutputConfig(other.getOutputConfig()); } internalGetMutableParams().mergeFrom(other.internalGetParams()); bitField0_ |= 0x00000008; this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { name_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000001; break; } // case 10 case 26: { input.readMessage(getInputConfigFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000002; break; } // case 26 case 34: { input.readMessage(getOutputConfigFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000004; break; } // case 34 case 42: { com.google.protobuf.MapEntry params__ = input.readMessage( ParamsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); internalGetMutableParams() .getMutableMap() .put(params__.getKey(), params__.getValue()); bitField0_ |= 0x00000008; break; } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * * *
     * Required. Name of the model requested to serve the batch prediction.
     * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @return The name. */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Required. Name of the model requested to serve the batch prediction.
     * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @return The bytes for name. */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Required. Name of the model requested to serve the batch prediction.
     * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @param value The name to set. * @return This builder for chaining. */ public Builder setName(java.lang.String value) { if (value == null) { throw new NullPointerException(); } name_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * * *
     * Required. Name of the model requested to serve the batch prediction.
     * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @return This builder for chaining. */ public Builder clearName() { name_ = getDefaultInstance().getName(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } /** * * *
     * Required. Name of the model requested to serve the batch prediction.
     * 
* * * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } * * * @param value The bytes for name to set. * @return This builder for chaining. */ public Builder setNameBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); name_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } private com.google.cloud.automl.v1.BatchPredictInputConfig inputConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictInputConfig, com.google.cloud.automl.v1.BatchPredictInputConfig.Builder, com.google.cloud.automl.v1.BatchPredictInputConfigOrBuilder> inputConfigBuilder_; /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * * * @return Whether the inputConfig field is set. */ public boolean hasInputConfig() { return ((bitField0_ & 0x00000002) != 0); } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * * * @return The inputConfig. */ public com.google.cloud.automl.v1.BatchPredictInputConfig getInputConfig() { if (inputConfigBuilder_ == null) { return inputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictInputConfig.getDefaultInstance() : inputConfig_; } else { return inputConfigBuilder_.getMessage(); } } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder setInputConfig(com.google.cloud.automl.v1.BatchPredictInputConfig value) { if (inputConfigBuilder_ == null) { if (value == null) { throw new NullPointerException(); } inputConfig_ = value; } else { inputConfigBuilder_.setMessage(value); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder setInputConfig( com.google.cloud.automl.v1.BatchPredictInputConfig.Builder builderForValue) { if (inputConfigBuilder_ == null) { inputConfig_ = builderForValue.build(); } else { inputConfigBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder mergeInputConfig(com.google.cloud.automl.v1.BatchPredictInputConfig value) { if (inputConfigBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0) && inputConfig_ != null && inputConfig_ != com.google.cloud.automl.v1.BatchPredictInputConfig.getDefaultInstance()) { getInputConfigBuilder().mergeFrom(value); } else { inputConfig_ = value; } } else { inputConfigBuilder_.mergeFrom(value); } if (inputConfig_ != null) { bitField0_ |= 0x00000002; onChanged(); } return this; } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder clearInputConfig() { bitField0_ = (bitField0_ & ~0x00000002); inputConfig_ = null; if (inputConfigBuilder_ != null) { inputConfigBuilder_.dispose(); inputConfigBuilder_ = null; } onChanged(); return this; } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public com.google.cloud.automl.v1.BatchPredictInputConfig.Builder getInputConfigBuilder() { bitField0_ |= 0x00000002; onChanged(); return getInputConfigFieldBuilder().getBuilder(); } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ public com.google.cloud.automl.v1.BatchPredictInputConfigOrBuilder getInputConfigOrBuilder() { if (inputConfigBuilder_ != null) { return inputConfigBuilder_.getMessageOrBuilder(); } else { return inputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictInputConfig.getDefaultInstance() : inputConfig_; } } /** * * *
     * Required. The input configuration for batch prediction.
     * 
* * * .google.cloud.automl.v1.BatchPredictInputConfig input_config = 3 [(.google.api.field_behavior) = REQUIRED]; * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictInputConfig, com.google.cloud.automl.v1.BatchPredictInputConfig.Builder, com.google.cloud.automl.v1.BatchPredictInputConfigOrBuilder> getInputConfigFieldBuilder() { if (inputConfigBuilder_ == null) { inputConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictInputConfig, com.google.cloud.automl.v1.BatchPredictInputConfig.Builder, com.google.cloud.automl.v1.BatchPredictInputConfigOrBuilder>( getInputConfig(), getParentForChildren(), isClean()); inputConfig_ = null; } return inputConfigBuilder_; } private com.google.cloud.automl.v1.BatchPredictOutputConfig outputConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictOutputConfig, com.google.cloud.automl.v1.BatchPredictOutputConfig.Builder, com.google.cloud.automl.v1.BatchPredictOutputConfigOrBuilder> outputConfigBuilder_; /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * * * @return Whether the outputConfig field is set. */ public boolean hasOutputConfig() { return ((bitField0_ & 0x00000004) != 0); } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * * * @return The outputConfig. */ public com.google.cloud.automl.v1.BatchPredictOutputConfig getOutputConfig() { if (outputConfigBuilder_ == null) { return outputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictOutputConfig.getDefaultInstance() : outputConfig_; } else { return outputConfigBuilder_.getMessage(); } } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder setOutputConfig(com.google.cloud.automl.v1.BatchPredictOutputConfig value) { if (outputConfigBuilder_ == null) { if (value == null) { throw new NullPointerException(); } outputConfig_ = value; } else { outputConfigBuilder_.setMessage(value); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder setOutputConfig( com.google.cloud.automl.v1.BatchPredictOutputConfig.Builder builderForValue) { if (outputConfigBuilder_ == null) { outputConfig_ = builderForValue.build(); } else { outputConfigBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder mergeOutputConfig(com.google.cloud.automl.v1.BatchPredictOutputConfig value) { if (outputConfigBuilder_ == null) { if (((bitField0_ & 0x00000004) != 0) && outputConfig_ != null && outputConfig_ != com.google.cloud.automl.v1.BatchPredictOutputConfig.getDefaultInstance()) { getOutputConfigBuilder().mergeFrom(value); } else { outputConfig_ = value; } } else { outputConfigBuilder_.mergeFrom(value); } if (outputConfig_ != null) { bitField0_ |= 0x00000004; onChanged(); } return this; } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public Builder clearOutputConfig() { bitField0_ = (bitField0_ & ~0x00000004); outputConfig_ = null; if (outputConfigBuilder_ != null) { outputConfigBuilder_.dispose(); outputConfigBuilder_ = null; } onChanged(); return this; } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public com.google.cloud.automl.v1.BatchPredictOutputConfig.Builder getOutputConfigBuilder() { bitField0_ |= 0x00000004; onChanged(); return getOutputConfigFieldBuilder().getBuilder(); } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ public com.google.cloud.automl.v1.BatchPredictOutputConfigOrBuilder getOutputConfigOrBuilder() { if (outputConfigBuilder_ != null) { return outputConfigBuilder_.getMessageOrBuilder(); } else { return outputConfig_ == null ? com.google.cloud.automl.v1.BatchPredictOutputConfig.getDefaultInstance() : outputConfig_; } } /** * * *
     * Required. The Configuration specifying where output predictions should
     * be written.
     * 
* * * .google.cloud.automl.v1.BatchPredictOutputConfig output_config = 4 [(.google.api.field_behavior) = REQUIRED]; * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictOutputConfig, com.google.cloud.automl.v1.BatchPredictOutputConfig.Builder, com.google.cloud.automl.v1.BatchPredictOutputConfigOrBuilder> getOutputConfigFieldBuilder() { if (outputConfigBuilder_ == null) { outputConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1.BatchPredictOutputConfig, com.google.cloud.automl.v1.BatchPredictOutputConfig.Builder, com.google.cloud.automl.v1.BatchPredictOutputConfigOrBuilder>( getOutputConfig(), getParentForChildren(), isClean()); outputConfig_ = null; } return outputConfigBuilder_; } private com.google.protobuf.MapField params_; private com.google.protobuf.MapField internalGetParams() { if (params_ == null) { return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry); } return params_; } private com.google.protobuf.MapField internalGetMutableParams() { if (params_ == null) { params_ = com.google.protobuf.MapField.newMapField(ParamsDefaultEntryHolder.defaultEntry); } if (!params_.isMutable()) { params_ = params_.copy(); } bitField0_ |= 0x00000008; onChanged(); return params_; } public int getParamsCount() { return internalGetParams().getMap().size(); } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ @java.lang.Override public boolean containsParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetParams().getMap().containsKey(key); } /** Use {@link #getParamsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getParams() { return getParamsMap(); } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ @java.lang.Override public java.util.Map getParamsMap() { return internalGetParams().getMap(); } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ @java.lang.Override public /* nullable */ java.lang.String getParamsOrDefault( java.lang.String key, /* nullable */ java.lang.String defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ @java.lang.Override public java.lang.String getParamsOrThrow(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public Builder clearParams() { bitField0_ = (bitField0_ & ~0x00000008); internalGetMutableParams().getMutableMap().clear(); return this; } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ public Builder removeParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } internalGetMutableParams().getMutableMap().remove(key); return this; } /** Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map getMutableParams() { bitField0_ |= 0x00000008; return internalGetMutableParams().getMutableMap(); } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ public Builder putParams(java.lang.String key, java.lang.String value) { if (key == null) { throw new NullPointerException("map key"); } if (value == null) { throw new NullPointerException("map value"); } internalGetMutableParams().getMutableMap().put(key, value); bitField0_ |= 0x00000008; return this; } /** * * *
     * Additional domain-specific parameters for the predictions, any string must
     * be up to 25000 characters long.
     *
     * AutoML Natural Language Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a text snippet, it will only produce results
     *   that have at least this confidence score. The default is 0.5.
     *
     *
     * AutoML Vision Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for an image, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * AutoML Vision Object Detection
     *
     * `score_threshold`
     * : (float) When Model detects objects on the image,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     * AutoML Video Intelligence Classification
     *
     * `score_threshold`
     * : (float) A value from 0.0 to 1.0. When the model
     *   makes predictions for a video, it will only produce results that
     *   have at least this confidence score. The default is 0.5.
     *
     * `segment_classification`
     * : (boolean) Set to true to request
     *   segment-level classification. AutoML Video Intelligence returns
     *   labels and their confidence scores for the entire segment of the
     *   video that user specified in the request configuration.
     *   The default is true.
     *
     * `shot_classification`
     * : (boolean) Set to true to request shot-level
     *   classification. AutoML Video Intelligence determines the boundaries
     *   for each camera shot in the entire segment of the video that user
     *   specified in the request configuration. AutoML Video Intelligence
     *   then returns labels and their confidence scores for each detected
     *   shot, along with the start and end time of the shot.
     *   The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification type,
     *   the quality of it depends on training data, but there are no metrics
     *   provided to describe that quality.
     *
     * `1s_interval_classification`
     * : (boolean) Set to true to request
     *   classification for a video at one-second intervals. AutoML Video
     *   Intelligence returns labels and their confidence scores for each
     *   second of the entire segment of the video that user specified in the
     *   request configuration. The default is false.
     *
     *   WARNING: Model evaluation is not done for this classification
     *   type, the quality of it depends on training data, but there are no
     *   metrics provided to describe that quality.
     *
     * AutoML Video Intelligence Object Tracking
     *
     * `score_threshold`
     * : (float) When Model detects objects on video frames,
     *   it will only produce bounding boxes which have at least this
     *   confidence score. Value in 0 to 1 range, default is 0.5.
     *
     * `max_bounding_box_count`
     * : (int64) The maximum number of bounding
     *   boxes returned per image. The default is 100, the
     *   number of bounding boxes returned might be limited by the server.
     *
     * `min_bounding_box_size`
     * : (float) Only bounding boxes with shortest edge
     *   at least that long as a relative value of video frame size are
     *   returned. Value in 0 to 1 range. Default is 0.
     * 
* * map<string, string> params = 5; */ public Builder putAllParams(java.util.Map values) { internalGetMutableParams().getMutableMap().putAll(values); bitField0_ |= 0x00000008; return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.automl.v1.BatchPredictRequest) } // @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictRequest) private static final com.google.cloud.automl.v1.BatchPredictRequest DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.automl.v1.BatchPredictRequest(); } public static com.google.cloud.automl.v1.BatchPredictRequest getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public BatchPredictRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.automl.v1.BatchPredictRequest getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy