All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.automl.v1beta1.InputConfig Maven / Gradle / Ivy

There is a newer version: 0.141.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/automl/v1beta1/io.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.automl.v1beta1;

/**
 *
 *
 * 
 * Input configuration for ImportData Action.
 *
 * The format of input depends on dataset_metadata the Dataset into which
 * the import is happening has. As input source the
 * [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source]
 * is expected, unless specified otherwise. Additionally any input .CSV file
 * by itself must be 100MB or smaller, unless specified otherwise.
 * If an "example" file (that is, image, video etc.) with identical content
 * (even if it had different GCS_FILE_PATH) is mentioned multiple times, then
 * its label, bounding boxes etc. are appended. The same file should be always
 * provided with the same ML_USE and GCS_FILE_PATH, if it is not, then
 * these values are nondeterministically selected from the given ones.
 *
 * The formats are represented in EBNF with commas being literal and with
 * non-terminal symbols defined near the end of this comment. The formats are:
 *
 *  *  For Image Classification:
 *         CSV file(s) with each line in format:
 *           ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
 *           GCS_FILE_PATH leads to image of up to 30MB in size. Supported
 *           extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO
 *           For MULTICLASS classification type, at most one LABEL is allowed
 *           per image. If an image has not yet been labeled, then it should be
 *           mentioned just once with no LABEL.
 *         Some sample rows:
 *           TRAIN,gs://folder/image1.jpg,daisy
 *           TEST,gs://folder/image2.jpg,dandelion,tulip,rose
 *           UNASSIGNED,gs://folder/image3.jpg,daisy
 *           UNASSIGNED,gs://folder/image4.jpg
 *
 *  *  For Image Object Detection:
 *         CSV file(s) with each line in format:
 *           ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,)
 *           GCS_FILE_PATH leads to image of up to 30MB in size. Supported
 *           extensions: .JPEG, .GIF, .PNG.
 *           Each image is assumed to be exhaustively labeled. The minimum
 *           allowed BOUNDING_BOX edge length is 0.01, and no more than 500
 *           BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined
 *           per line). If an image has not yet been labeled, then it should be
 *           mentioned just once with no LABEL and the ",,,,,,," in place of the
 *           BOUNDING_BOX. For images which are known to not contain any
 *           bounding boxes, they should be labelled explictly as
 *           "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the
 *           BOUNDING_BOX.
 *         Sample rows:
 *           TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
 *           TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
 *           UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
 *           TEST,gs://folder/im3.png,,,,,,,,,
 *           TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,,
 *
 *  *  For Video Classification:
 *         CSV file(s) with each line in format:
 *           ML_USE,GCS_FILE_PATH
 *           where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
 *           should lead to another .csv file which describes examples that have
 *           given ML_USE, using the following row format:
 *           GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
 *           Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
 *           to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
 *           TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
 *           length of the video, and end has to be after the start. Any segment
 *           of a video which has one or more labels on it, is considered a
 *           hard negative for all other labels. Any segment with no labels on
 *           it is considered to be unknown. If a whole video is unknown, then
 *           it shuold be mentioned just once with ",," in place of LABEL,
 *           TIME_SEGMENT_START,TIME_SEGMENT_END.
 *         Sample top level CSV file:
 *           TRAIN,gs://folder/train_videos.csv
 *           TEST,gs://folder/test_videos.csv
 *           UNASSIGNED,gs://folder/other_videos.csv
 *         Sample rows of a CSV file for a particular ML_USE:
 *           gs://folder/video1.avi,car,120,180.000021
 *           gs://folder/video1.avi,bike,150,180.000021
 *           gs://folder/vid2.avi,car,0,60.5
 *           gs://folder/vid3.avi,,,
 *
 *  *  For Video Object Tracking:
 *         CSV file(s) with each line in format:
 *           ML_USE,GCS_FILE_PATH
 *           where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
 *           should lead to another .csv file which describes examples that have
 *           given ML_USE, using one of the following row format:
 *           GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
 *           or
 *           GCS_FILE_PATH,,,,,,,,,,
 *           Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
 *           to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
 *           Providing INSTANCE_IDs can help to obtain a better model. When
 *           a specific labeled entity leaves the video frame, and shows up
 *           afterwards it is not required, albeit preferable, that the same
 *           INSTANCE_ID is given to it.
 *           TIMESTAMP must be within the length of the video, the
 *           BOUNDING_BOX is assumed to be drawn on the closest video's frame
 *           to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected
 *           to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per
 *           frame are allowed. If a whole video is unknown, then it should be
 *           mentioned just once with ",,,,,,,,,," in place of LABEL,
 *           [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX.
 *         Sample top level CSV file:
 *           TRAIN,gs://folder/train_videos.csv
 *           TEST,gs://folder/test_videos.csv
 *           UNASSIGNED,gs://folder/other_videos.csv
 *         Seven sample rows of a CSV file for a particular ML_USE:
 *           gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
 *           gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
 *           gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
 *           gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
 *           gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
 *           gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
 *           gs://folder/video2.avi,,,,,,,,,,,
 *  *  For Text Extraction:
 *         CSV file(s) with each line in format:
 *           ML_USE,GCS_FILE_PATH
 *           GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which
 *           either imports text in-line or as documents. Any given
 *           .JSONL file must be 100MB or smaller.
 *           The in-line .JSONL file contains, per line, a proto that wraps a
 *           TextSnippet proto (in json representation) followed by one or more
 *           AnnotationPayload protos (called annotations), which have
 *           display_name and text_extraction detail populated. The given text
 *           is expected to be annotated exhaustively, for example, if you look
 *           for animals and text contains "dolphin" that is not labeled, then
 *           "dolphin" is assumed to not be an animal. Any given text snippet
 *           content must be 10KB or smaller, and also be UTF-8 NFC encoded
 *           (ASCII already is).
 *           The document .JSONL file contains, per line, a proto that wraps a
 *           Document proto. The Document proto must have either document_text
 *           or input_config set. In document_text case, the Document proto may
 *           also contain the spatial information of the document, including
 *           layout, document dimension and page number. In input_config case,
 *           only PDF documents are supported now, and each document may be up
 *           to 2MB large. Currently, annotations on documents cannot be
 *           specified at import.
 *         Three sample CSV rows:
 *           TRAIN,gs://folder/file1.jsonl
 *           VALIDATE,gs://folder/file2.jsonl
 *           TEST,gs://folder/file3.jsonl
 *         Sample in-line JSON Lines file for entity extraction (presented here
 *         with artificial line breaks, but the only actual line break is
 *         denoted by \n).:
 *           {
 *             "document": {
 *               "document_text": {"content": "dog cat"}
 *               "layout": [
 *                 {
 *                   "text_segment": {
 *                     "start_offset": 0,
 *                     "end_offset": 3,
 *                   },
 *                   "page_number": 1,
 *                   "bounding_poly": {
 *                     "normalized_vertices": [
 *                       {"x": 0.1, "y": 0.1},
 *                       {"x": 0.1, "y": 0.3},
 *                       {"x": 0.3, "y": 0.3},
 *                       {"x": 0.3, "y": 0.1},
 *                     ],
 *                   },
 *                   "text_segment_type": TOKEN,
 *                 },
 *                 {
 *                   "text_segment": {
 *                     "start_offset": 4,
 *                     "end_offset": 7,
 *                   },
 *                   "page_number": 1,
 *                   "bounding_poly": {
 *                     "normalized_vertices": [
 *                       {"x": 0.4, "y": 0.1},
 *                       {"x": 0.4, "y": 0.3},
 *                       {"x": 0.8, "y": 0.3},
 *                       {"x": 0.8, "y": 0.1},
 *                     ],
 *                   },
 *                   "text_segment_type": TOKEN,
 *                 }
 *
 *               ],
 *               "document_dimensions": {
 *                 "width": 8.27,
 *                 "height": 11.69,
 *                 "unit": INCH,
 *               }
 *               "page_count": 1,
 *             },
 *             "annotations": [
 *               {
 *                 "display_name": "animal",
 *                 "text_extraction": {"text_segment": {"start_offset": 0,
 *                 "end_offset": 3}}
 *               },
 *               {
 *                 "display_name": "animal",
 *                 "text_extraction": {"text_segment": {"start_offset": 4,
 *                 "end_offset": 7}}
 *               }
 *             ],
 *           }\n
 *           {
 *              "text_snippet": {
 *                "content": "This dog is good."
 *              },
 *              "annotations": [
 *                {
 *                  "display_name": "animal",
 *                  "text_extraction": {
 *                    "text_segment": {"start_offset": 5, "end_offset": 8}
 *                  }
 *                }
 *              ]
 *           }
 *         Sample document JSON Lines file (presented here with artificial line
 *         breaks, but the only actual line break is denoted by \n).:
 *           {
 *             "document": {
 *               "input_config": {
 *                 "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
 *                 }
 *               }
 *             }
 *           }\n
 *           {
 *             "document": {
 *               "input_config": {
 *                 "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
 *                 }
 *               }
 *             }
 *           }
 *
 *  *  For Text Classification:
 *         CSV file(s) with each line in format:
 *           ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
 *           TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
 *           the column content is a valid gcs file path, i.e. prefixed by
 *           "gs://", it will be treated as a GCS_FILE_PATH, else if the content
 *           is enclosed within double quotes (""), it is
 *           treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
 *           must lead to a .txt file with UTF-8 encoding, for example,
 *           "gs://folder/content.txt", and the content in it is extracted
 *           as a text snippet. In TEXT_SNIPPET case, the column content
 *           excluding quotes is treated as to be imported text snippet. In
 *           both cases, the text snippet/file size must be within 128kB.
 *           Maximum 100 unique labels are allowed per CSV row.
 *         Sample rows:
 *           TRAIN,"They have bad food and very rude",RudeService,BadFood
 *           TRAIN,gs://folder/content.txt,SlowService
 *           TEST,"Typically always bad service there.",RudeService
 *           VALIDATE,"Stomach ache to go.",BadFood
 *
 *  *  For Text Sentiment:
 *         CSV file(s) with each line in format:
 *           ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
 *           TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
 *           the column content is a valid gcs file path, that is, prefixed by
 *           "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated
 *           as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
 *           must lead to a .txt file with UTF-8 encoding, for example,
 *           "gs://folder/content.txt", and the content in it is extracted
 *           as a text snippet. In TEXT_SNIPPET case, the column content itself
 *           is treated as to be imported text snippet. In both cases, the
 *           text snippet must be up to 500 characters long.
 *         Sample rows:
 *           TRAIN,"@freewrytin this is way too good for your product",2
 *           TRAIN,"I need this product so bad",3
 *           TEST,"Thank you for this product.",4
 *           VALIDATE,gs://folder/content.txt,2
 *
 *   *  For Tables:
 *         Either
 *         [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
 *
 * [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]
 *         can be used. All inputs is concatenated into a single
 *
 * [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
 *         For gcs_source:
 *           CSV file(s), where the first row of the first file is the header,
 *           containing unique column names. If the first row of a subsequent
 *           file is the same as the header, then it is also treated as a
 *           header. All other rows contain values for the corresponding
 *           columns.
 *           Each .CSV file by itself must be 10GB or smaller, and their total
 *           size must be 100GB or smaller.
 *           First three sample rows of a CSV file:
 *           "Id","First Name","Last Name","Dob","Addresses"
 *
 * "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
 *
 * "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
 *         For bigquery_source:
 *           An URI of a BigQuery table. The user data size of the BigQuery
 *           table must be 100GB or smaller.
 *         An imported table must have between 2 and 1,000 columns, inclusive,
 *         and between 1000 and 100,000,000 rows, inclusive. There are at most 5
 *         import data running in parallel.
 *  Definitions:
 *  ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED"
 *           Describes how the given example (file) should be used for model
 *           training. "UNASSIGNED" can be used when user has no preference.
 *  GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png".
 *  LABEL = A display name of an object on an image, video etc., e.g. "dog".
 *          Must be up to 32 characters long and can consist only of ASCII
 *          Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
 *          For each label an AnnotationSpec is created which display_name
 *          becomes the label; AnnotationSpecs are given back in predictions.
 *  INSTANCE_ID = A positive integer that identifies a specific instance of a
 *                labeled entity on an example. Used e.g. to track two cars on
 *                a video while being able to tell apart which one is which.
 *  BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,,
 *                 A rectangle parallel to the frame of the example (image,
 *                 video). If 4 vertices are given they are connected by edges
 *                 in the order provided, if 2 are given they are recognized
 *                 as diagonally opposite vertices of the rectangle.
 *  VERTEX = COORDINATE,COORDINATE
 *           First coordinate is horizontal (x), the second is vertical (y).
 *  COORDINATE = A float in 0 to 1 range, relative to total length of
 *               image or video in given dimension. For fractions the
 *               leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
 *               Point 0,0 is in top left.
 *  TIME_SEGMENT_START = TIME_OFFSET
 *                       Expresses a beginning, inclusive, of a time segment
 *                       within an example that has a time dimension
 *                       (e.g. video).
 *  TIME_SEGMENT_END = TIME_OFFSET
 *                     Expresses an end, exclusive, of a time segment within
 *                     an example that has a time dimension (e.g. video).
 *  TIME_OFFSET = A number of seconds as measured from the start of an
 *                example (e.g. video). Fractions are allowed, up to a
 *                microsecond precision. "inf" is allowed, and it means the end
 *                of the example.
 *  TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
 *                 double quotes ("").
 *  SENTIMENT = An integer between 0 and
 *              Dataset.text_sentiment_dataset_metadata.sentiment_max
 *              (inclusive). Describes the ordinal of the sentiment - higher
 *              value means a more positive sentiment. All the values are
 *              completely relative, i.e. neither 0 needs to mean a negative or
 *              neutral sentiment nor sentiment_max needs to mean a positive one
 *              - it is just required that 0 is the least positive sentiment
 *              in the data, and sentiment_max is the  most positive one.
 *              The SENTIMENT shouldn't be confused with "score" or "magnitude"
 *              from the previous Natural Language Sentiment Analysis API.
 *              All SENTIMENT values between 0 and sentiment_max must be
 *              represented in the imported data. On prediction the same 0 to
 *              sentiment_max range will be used. The difference between
 *              neighboring sentiment values needs not to be uniform, e.g. 1 and
 *              2 may be similar whereas the difference between 2 and 3 may be
 *              huge.
 *
 *  Errors:
 *  If any of the provided CSV files can't be parsed or if more than certain
 *  percent of CSV rows cannot be processed then the operation fails and
 *  nothing is imported. Regardless of overall success or failure the per-row
 *  failures, up to a certain count cap, is listed in
 *  Operation.metadata.partial_failures.
 * 
* * Protobuf type {@code google.cloud.automl.v1beta1.InputConfig} */ public final class InputConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.automl.v1beta1.InputConfig) InputConfigOrBuilder { private static final long serialVersionUID = 0L; // Use InputConfig.newBuilder() to construct. private InputConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private InputConfig() {} @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new InputConfig(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_descriptor; } @SuppressWarnings({"rawtypes"}) @java.lang.Override protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( int number) { switch (number) { case 2: return internalGetParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1beta1.InputConfig.class, com.google.cloud.automl.v1beta1.InputConfig.Builder.class); } private int sourceCase_ = 0; @SuppressWarnings("serial") private java.lang.Object source_; public enum SourceCase implements com.google.protobuf.Internal.EnumLite, com.google.protobuf.AbstractMessage.InternalOneOfEnum { GCS_SOURCE(1), BIGQUERY_SOURCE(3), SOURCE_NOT_SET(0); private final int value; private SourceCase(int value) { this.value = value; } /** * @param value The number of the enum to look for. * @return The enum associated with the given number. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static SourceCase valueOf(int value) { return forNumber(value); } public static SourceCase forNumber(int value) { switch (value) { case 1: return GCS_SOURCE; case 3: return BIGQUERY_SOURCE; case 0: return SOURCE_NOT_SET; default: return null; } } public int getNumber() { return this.value; } }; public SourceCase getSourceCase() { return SourceCase.forNumber(sourceCase_); } public static final int GCS_SOURCE_FIELD_NUMBER = 1; /** * * *
   * The Google Cloud Storage location for the input content.
   * In ImportData, the gcs_source points to a csv with structure described in
   * the comment.
   * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; * * @return Whether the gcsSource field is set. */ @java.lang.Override public boolean hasGcsSource() { return sourceCase_ == 1; } /** * * *
   * The Google Cloud Storage location for the input content.
   * In ImportData, the gcs_source points to a csv with structure described in
   * the comment.
   * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; * * @return The gcsSource. */ @java.lang.Override public com.google.cloud.automl.v1beta1.GcsSource getGcsSource() { if (sourceCase_ == 1) { return (com.google.cloud.automl.v1beta1.GcsSource) source_; } return com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } /** * * *
   * The Google Cloud Storage location for the input content.
   * In ImportData, the gcs_source points to a csv with structure described in
   * the comment.
   * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ @java.lang.Override public com.google.cloud.automl.v1beta1.GcsSourceOrBuilder getGcsSourceOrBuilder() { if (sourceCase_ == 1) { return (com.google.cloud.automl.v1beta1.GcsSource) source_; } return com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } public static final int BIGQUERY_SOURCE_FIELD_NUMBER = 3; /** * * *
   * The BigQuery location for the input content.
   * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; * * @return Whether the bigquerySource field is set. */ @java.lang.Override public boolean hasBigquerySource() { return sourceCase_ == 3; } /** * * *
   * The BigQuery location for the input content.
   * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; * * @return The bigquerySource. */ @java.lang.Override public com.google.cloud.automl.v1beta1.BigQuerySource getBigquerySource() { if (sourceCase_ == 3) { return (com.google.cloud.automl.v1beta1.BigQuerySource) source_; } return com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } /** * * *
   * The BigQuery location for the input content.
   * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ @java.lang.Override public com.google.cloud.automl.v1beta1.BigQuerySourceOrBuilder getBigquerySourceOrBuilder() { if (sourceCase_ == 3) { return (com.google.cloud.automl.v1beta1.BigQuerySource) source_; } return com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } public static final int PARAMS_FIELD_NUMBER = 2; private static final class ParamsDefaultEntryHolder { static final com.google.protobuf.MapEntry defaultEntry = com.google.protobuf.MapEntry.newDefaultInstance( com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_ParamsEntry_descriptor, com.google.protobuf.WireFormat.FieldType.STRING, "", com.google.protobuf.WireFormat.FieldType.STRING, ""); } @SuppressWarnings("serial") private com.google.protobuf.MapField params_; private com.google.protobuf.MapField internalGetParams() { if (params_ == null) { return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry); } return params_; } public int getParamsCount() { return internalGetParams().getMap().size(); } /** * * *
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   *
   * *  For Tables:
   *    `schema_inference_version` - (integer) Required. The version of the
   *        algorithm that should be used for the initial inference of the
   *        schema (columns' DataTypes) of the table the data is being imported
   *        into. Allowed values: "1".
   * 
* * map<string, string> params = 2; */ @java.lang.Override public boolean containsParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetParams().getMap().containsKey(key); } /** Use {@link #getParamsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getParams() { return getParamsMap(); } /** * * *
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   *
   * *  For Tables:
   *    `schema_inference_version` - (integer) Required. The version of the
   *        algorithm that should be used for the initial inference of the
   *        schema (columns' DataTypes) of the table the data is being imported
   *        into. Allowed values: "1".
   * 
* * map<string, string> params = 2; */ @java.lang.Override public java.util.Map getParamsMap() { return internalGetParams().getMap(); } /** * * *
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   *
   * *  For Tables:
   *    `schema_inference_version` - (integer) Required. The version of the
   *        algorithm that should be used for the initial inference of the
   *        schema (columns' DataTypes) of the table the data is being imported
   *        into. Allowed values: "1".
   * 
* * map<string, string> params = 2; */ @java.lang.Override public /* nullable */ java.lang.String getParamsOrDefault( java.lang.String key, /* nullable */ java.lang.String defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** * * *
   * Additional domain-specific parameters describing the semantic of the
   * imported data, any string must be up to 25000
   * characters long.
   *
   * *  For Tables:
   *    `schema_inference_version` - (integer) Required. The version of the
   *        algorithm that should be used for the initial inference of the
   *        schema (columns' DataTypes) of the table the data is being imported
   *        into. Allowed values: "1".
   * 
* * map<string, string> params = 2; */ @java.lang.Override public java.lang.String getParamsOrThrow(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (sourceCase_ == 1) { output.writeMessage(1, (com.google.cloud.automl.v1beta1.GcsSource) source_); } com.google.protobuf.GeneratedMessageV3.serializeStringMapTo( output, internalGetParams(), ParamsDefaultEntryHolder.defaultEntry, 2); if (sourceCase_ == 3) { output.writeMessage(3, (com.google.cloud.automl.v1beta1.BigQuerySource) source_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (sourceCase_ == 1) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 1, (com.google.cloud.automl.v1beta1.GcsSource) source_); } for (java.util.Map.Entry entry : internalGetParams().getMap().entrySet()) { com.google.protobuf.MapEntry params__ = ParamsDefaultEntryHolder.defaultEntry .newBuilderForType() .setKey(entry.getKey()) .setValue(entry.getValue()) .build(); size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, params__); } if (sourceCase_ == 3) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 3, (com.google.cloud.automl.v1beta1.BigQuerySource) source_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.automl.v1beta1.InputConfig)) { return super.equals(obj); } com.google.cloud.automl.v1beta1.InputConfig other = (com.google.cloud.automl.v1beta1.InputConfig) obj; if (!internalGetParams().equals(other.internalGetParams())) return false; if (!getSourceCase().equals(other.getSourceCase())) return false; switch (sourceCase_) { case 1: if (!getGcsSource().equals(other.getGcsSource())) return false; break; case 3: if (!getBigquerySource().equals(other.getBigquerySource())) return false; break; case 0: default: } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (!internalGetParams().getMap().isEmpty()) { hash = (37 * hash) + PARAMS_FIELD_NUMBER; hash = (53 * hash) + internalGetParams().hashCode(); } switch (sourceCase_) { case 1: hash = (37 * hash) + GCS_SOURCE_FIELD_NUMBER; hash = (53 * hash) + getGcsSource().hashCode(); break; case 3: hash = (37 * hash) + BIGQUERY_SOURCE_FIELD_NUMBER; hash = (53 * hash) + getBigquerySource().hashCode(); break; case 0: default: } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1beta1.InputConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.automl.v1beta1.InputConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1beta1.InputConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.automl.v1beta1.InputConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Input configuration for ImportData Action.
   *
   * The format of input depends on dataset_metadata the Dataset into which
   * the import is happening has. As input source the
   * [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source]
   * is expected, unless specified otherwise. Additionally any input .CSV file
   * by itself must be 100MB or smaller, unless specified otherwise.
   * If an "example" file (that is, image, video etc.) with identical content
   * (even if it had different GCS_FILE_PATH) is mentioned multiple times, then
   * its label, bounding boxes etc. are appended. The same file should be always
   * provided with the same ML_USE and GCS_FILE_PATH, if it is not, then
   * these values are nondeterministically selected from the given ones.
   *
   * The formats are represented in EBNF with commas being literal and with
   * non-terminal symbols defined near the end of this comment. The formats are:
   *
   *  *  For Image Classification:
   *         CSV file(s) with each line in format:
   *           ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
   *           GCS_FILE_PATH leads to image of up to 30MB in size. Supported
   *           extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO
   *           For MULTICLASS classification type, at most one LABEL is allowed
   *           per image. If an image has not yet been labeled, then it should be
   *           mentioned just once with no LABEL.
   *         Some sample rows:
   *           TRAIN,gs://folder/image1.jpg,daisy
   *           TEST,gs://folder/image2.jpg,dandelion,tulip,rose
   *           UNASSIGNED,gs://folder/image3.jpg,daisy
   *           UNASSIGNED,gs://folder/image4.jpg
   *
   *  *  For Image Object Detection:
   *         CSV file(s) with each line in format:
   *           ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,)
   *           GCS_FILE_PATH leads to image of up to 30MB in size. Supported
   *           extensions: .JPEG, .GIF, .PNG.
   *           Each image is assumed to be exhaustively labeled. The minimum
   *           allowed BOUNDING_BOX edge length is 0.01, and no more than 500
   *           BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is defined
   *           per line). If an image has not yet been labeled, then it should be
   *           mentioned just once with no LABEL and the ",,,,,,," in place of the
   *           BOUNDING_BOX. For images which are known to not contain any
   *           bounding boxes, they should be labelled explictly as
   *           "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the
   *           BOUNDING_BOX.
   *         Sample rows:
   *           TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
   *           TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
   *           UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
   *           TEST,gs://folder/im3.png,,,,,,,,,
   *           TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,,
   *
   *  *  For Video Classification:
   *         CSV file(s) with each line in format:
   *           ML_USE,GCS_FILE_PATH
   *           where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
   *           should lead to another .csv file which describes examples that have
   *           given ML_USE, using the following row format:
   *           GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
   *           Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
   *           to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
   *           TIME_SEGMENT_START and TIME_SEGMENT_END must be within the
   *           length of the video, and end has to be after the start. Any segment
   *           of a video which has one or more labels on it, is considered a
   *           hard negative for all other labels. Any segment with no labels on
   *           it is considered to be unknown. If a whole video is unknown, then
   *           it shuold be mentioned just once with ",," in place of LABEL,
   *           TIME_SEGMENT_START,TIME_SEGMENT_END.
   *         Sample top level CSV file:
   *           TRAIN,gs://folder/train_videos.csv
   *           TEST,gs://folder/test_videos.csv
   *           UNASSIGNED,gs://folder/other_videos.csv
   *         Sample rows of a CSV file for a particular ML_USE:
   *           gs://folder/video1.avi,car,120,180.000021
   *           gs://folder/video1.avi,bike,150,180.000021
   *           gs://folder/vid2.avi,car,0,60.5
   *           gs://folder/vid3.avi,,,
   *
   *  *  For Video Object Tracking:
   *         CSV file(s) with each line in format:
   *           ML_USE,GCS_FILE_PATH
   *           where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH
   *           should lead to another .csv file which describes examples that have
   *           given ML_USE, using one of the following row format:
   *           GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
   *           or
   *           GCS_FILE_PATH,,,,,,,,,,
   *           Here GCS_FILE_PATH leads to a video of up to 50GB in size and up
   *           to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
   *           Providing INSTANCE_IDs can help to obtain a better model. When
   *           a specific labeled entity leaves the video frame, and shows up
   *           afterwards it is not required, albeit preferable, that the same
   *           INSTANCE_ID is given to it.
   *           TIMESTAMP must be within the length of the video, the
   *           BOUNDING_BOX is assumed to be drawn on the closest video's frame
   *           to the TIMESTAMP. Any mentioned by the TIMESTAMP frame is expected
   *           to be exhaustively labeled and no more than 500 BOUNDING_BOX-es per
   *           frame are allowed. If a whole video is unknown, then it should be
   *           mentioned just once with ",,,,,,,,,," in place of LABEL,
   *           [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX.
   *         Sample top level CSV file:
   *           TRAIN,gs://folder/train_videos.csv
   *           TEST,gs://folder/test_videos.csv
   *           UNASSIGNED,gs://folder/other_videos.csv
   *         Seven sample rows of a CSV file for a particular ML_USE:
   *           gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
   *           gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
   *           gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
   *           gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
   *           gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
   *           gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
   *           gs://folder/video2.avi,,,,,,,,,,,
   *  *  For Text Extraction:
   *         CSV file(s) with each line in format:
   *           ML_USE,GCS_FILE_PATH
   *           GCS_FILE_PATH leads to a .JSONL (that is, JSON Lines) file which
   *           either imports text in-line or as documents. Any given
   *           .JSONL file must be 100MB or smaller.
   *           The in-line .JSONL file contains, per line, a proto that wraps a
   *           TextSnippet proto (in json representation) followed by one or more
   *           AnnotationPayload protos (called annotations), which have
   *           display_name and text_extraction detail populated. The given text
   *           is expected to be annotated exhaustively, for example, if you look
   *           for animals and text contains "dolphin" that is not labeled, then
   *           "dolphin" is assumed to not be an animal. Any given text snippet
   *           content must be 10KB or smaller, and also be UTF-8 NFC encoded
   *           (ASCII already is).
   *           The document .JSONL file contains, per line, a proto that wraps a
   *           Document proto. The Document proto must have either document_text
   *           or input_config set. In document_text case, the Document proto may
   *           also contain the spatial information of the document, including
   *           layout, document dimension and page number. In input_config case,
   *           only PDF documents are supported now, and each document may be up
   *           to 2MB large. Currently, annotations on documents cannot be
   *           specified at import.
   *         Three sample CSV rows:
   *           TRAIN,gs://folder/file1.jsonl
   *           VALIDATE,gs://folder/file2.jsonl
   *           TEST,gs://folder/file3.jsonl
   *         Sample in-line JSON Lines file for entity extraction (presented here
   *         with artificial line breaks, but the only actual line break is
   *         denoted by \n).:
   *           {
   *             "document": {
   *               "document_text": {"content": "dog cat"}
   *               "layout": [
   *                 {
   *                   "text_segment": {
   *                     "start_offset": 0,
   *                     "end_offset": 3,
   *                   },
   *                   "page_number": 1,
   *                   "bounding_poly": {
   *                     "normalized_vertices": [
   *                       {"x": 0.1, "y": 0.1},
   *                       {"x": 0.1, "y": 0.3},
   *                       {"x": 0.3, "y": 0.3},
   *                       {"x": 0.3, "y": 0.1},
   *                     ],
   *                   },
   *                   "text_segment_type": TOKEN,
   *                 },
   *                 {
   *                   "text_segment": {
   *                     "start_offset": 4,
   *                     "end_offset": 7,
   *                   },
   *                   "page_number": 1,
   *                   "bounding_poly": {
   *                     "normalized_vertices": [
   *                       {"x": 0.4, "y": 0.1},
   *                       {"x": 0.4, "y": 0.3},
   *                       {"x": 0.8, "y": 0.3},
   *                       {"x": 0.8, "y": 0.1},
   *                     ],
   *                   },
   *                   "text_segment_type": TOKEN,
   *                 }
   *
   *               ],
   *               "document_dimensions": {
   *                 "width": 8.27,
   *                 "height": 11.69,
   *                 "unit": INCH,
   *               }
   *               "page_count": 1,
   *             },
   *             "annotations": [
   *               {
   *                 "display_name": "animal",
   *                 "text_extraction": {"text_segment": {"start_offset": 0,
   *                 "end_offset": 3}}
   *               },
   *               {
   *                 "display_name": "animal",
   *                 "text_extraction": {"text_segment": {"start_offset": 4,
   *                 "end_offset": 7}}
   *               }
   *             ],
   *           }\n
   *           {
   *              "text_snippet": {
   *                "content": "This dog is good."
   *              },
   *              "annotations": [
   *                {
   *                  "display_name": "animal",
   *                  "text_extraction": {
   *                    "text_segment": {"start_offset": 5, "end_offset": 8}
   *                  }
   *                }
   *              ]
   *           }
   *         Sample document JSON Lines file (presented here with artificial line
   *         breaks, but the only actual line break is denoted by \n).:
   *           {
   *             "document": {
   *               "input_config": {
   *                 "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
   *                 }
   *               }
   *             }
   *           }\n
   *           {
   *             "document": {
   *               "input_config": {
   *                 "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ]
   *                 }
   *               }
   *             }
   *           }
   *
   *  *  For Text Classification:
   *         CSV file(s) with each line in format:
   *           ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
   *           TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
   *           the column content is a valid gcs file path, i.e. prefixed by
   *           "gs://", it will be treated as a GCS_FILE_PATH, else if the content
   *           is enclosed within double quotes (""), it is
   *           treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
   *           must lead to a .txt file with UTF-8 encoding, for example,
   *           "gs://folder/content.txt", and the content in it is extracted
   *           as a text snippet. In TEXT_SNIPPET case, the column content
   *           excluding quotes is treated as to be imported text snippet. In
   *           both cases, the text snippet/file size must be within 128kB.
   *           Maximum 100 unique labels are allowed per CSV row.
   *         Sample rows:
   *           TRAIN,"They have bad food and very rude",RudeService,BadFood
   *           TRAIN,gs://folder/content.txt,SlowService
   *           TEST,"Typically always bad service there.",RudeService
   *           VALIDATE,"Stomach ache to go.",BadFood
   *
   *  *  For Text Sentiment:
   *         CSV file(s) with each line in format:
   *           ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
   *           TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If
   *           the column content is a valid gcs file path, that is, prefixed by
   *           "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated
   *           as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path
   *           must lead to a .txt file with UTF-8 encoding, for example,
   *           "gs://folder/content.txt", and the content in it is extracted
   *           as a text snippet. In TEXT_SNIPPET case, the column content itself
   *           is treated as to be imported text snippet. In both cases, the
   *           text snippet must be up to 500 characters long.
   *         Sample rows:
   *           TRAIN,"@freewrytin this is way too good for your product",2
   *           TRAIN,"I need this product so bad",3
   *           TEST,"Thank you for this product.",4
   *           VALIDATE,gs://folder/content.txt,2
   *
   *   *  For Tables:
   *         Either
   *         [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
   *
   * [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]
   *         can be used. All inputs is concatenated into a single
   *
   * [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
   *         For gcs_source:
   *           CSV file(s), where the first row of the first file is the header,
   *           containing unique column names. If the first row of a subsequent
   *           file is the same as the header, then it is also treated as a
   *           header. All other rows contain values for the corresponding
   *           columns.
   *           Each .CSV file by itself must be 10GB or smaller, and their total
   *           size must be 100GB or smaller.
   *           First three sample rows of a CSV file:
   *           "Id","First Name","Last Name","Dob","Addresses"
   *
   * "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
   *
   * "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
   *         For bigquery_source:
   *           An URI of a BigQuery table. The user data size of the BigQuery
   *           table must be 100GB or smaller.
   *         An imported table must have between 2 and 1,000 columns, inclusive,
   *         and between 1000 and 100,000,000 rows, inclusive. There are at most 5
   *         import data running in parallel.
   *  Definitions:
   *  ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED"
   *           Describes how the given example (file) should be used for model
   *           training. "UNASSIGNED" can be used when user has no preference.
   *  GCS_FILE_PATH = A path to file on GCS, e.g. "gs://folder/image1.png".
   *  LABEL = A display name of an object on an image, video etc., e.g. "dog".
   *          Must be up to 32 characters long and can consist only of ASCII
   *          Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9.
   *          For each label an AnnotationSpec is created which display_name
   *          becomes the label; AnnotationSpecs are given back in predictions.
   *  INSTANCE_ID = A positive integer that identifies a specific instance of a
   *                labeled entity on an example. Used e.g. to track two cars on
   *                a video while being able to tell apart which one is which.
   *  BOUNDING_BOX = VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,,
   *                 A rectangle parallel to the frame of the example (image,
   *                 video). If 4 vertices are given they are connected by edges
   *                 in the order provided, if 2 are given they are recognized
   *                 as diagonally opposite vertices of the rectangle.
   *  VERTEX = COORDINATE,COORDINATE
   *           First coordinate is horizontal (x), the second is vertical (y).
   *  COORDINATE = A float in 0 to 1 range, relative to total length of
   *               image or video in given dimension. For fractions the
   *               leading non-decimal 0 can be omitted (i.e. 0.3 = .3).
   *               Point 0,0 is in top left.
   *  TIME_SEGMENT_START = TIME_OFFSET
   *                       Expresses a beginning, inclusive, of a time segment
   *                       within an example that has a time dimension
   *                       (e.g. video).
   *  TIME_SEGMENT_END = TIME_OFFSET
   *                     Expresses an end, exclusive, of a time segment within
   *                     an example that has a time dimension (e.g. video).
   *  TIME_OFFSET = A number of seconds as measured from the start of an
   *                example (e.g. video). Fractions are allowed, up to a
   *                microsecond precision. "inf" is allowed, and it means the end
   *                of the example.
   *  TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed within
   *                 double quotes ("").
   *  SENTIMENT = An integer between 0 and
   *              Dataset.text_sentiment_dataset_metadata.sentiment_max
   *              (inclusive). Describes the ordinal of the sentiment - higher
   *              value means a more positive sentiment. All the values are
   *              completely relative, i.e. neither 0 needs to mean a negative or
   *              neutral sentiment nor sentiment_max needs to mean a positive one
   *              - it is just required that 0 is the least positive sentiment
   *              in the data, and sentiment_max is the  most positive one.
   *              The SENTIMENT shouldn't be confused with "score" or "magnitude"
   *              from the previous Natural Language Sentiment Analysis API.
   *              All SENTIMENT values between 0 and sentiment_max must be
   *              represented in the imported data. On prediction the same 0 to
   *              sentiment_max range will be used. The difference between
   *              neighboring sentiment values needs not to be uniform, e.g. 1 and
   *              2 may be similar whereas the difference between 2 and 3 may be
   *              huge.
   *
   *  Errors:
   *  If any of the provided CSV files can't be parsed or if more than certain
   *  percent of CSV rows cannot be processed then the operation fails and
   *  nothing is imported. Regardless of overall success or failure the per-row
   *  failures, up to a certain count cap, is listed in
   *  Operation.metadata.partial_failures.
   * 
* * Protobuf type {@code google.cloud.automl.v1beta1.InputConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.automl.v1beta1.InputConfig) com.google.cloud.automl.v1beta1.InputConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_descriptor; } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection( int number) { switch (number) { case 2: return internalGetParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection( int number) { switch (number) { case 2: return internalGetMutableParams(); default: throw new RuntimeException("Invalid map field number: " + number); } } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1beta1.InputConfig.class, com.google.cloud.automl.v1beta1.InputConfig.Builder.class); } // Construct using com.google.cloud.automl.v1beta1.InputConfig.newBuilder() private Builder() {} private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; if (gcsSourceBuilder_ != null) { gcsSourceBuilder_.clear(); } if (bigquerySourceBuilder_ != null) { bigquerySourceBuilder_.clear(); } internalGetMutableParams().clear(); sourceCase_ = 0; source_ = null; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.automl.v1beta1.Io .internal_static_google_cloud_automl_v1beta1_InputConfig_descriptor; } @java.lang.Override public com.google.cloud.automl.v1beta1.InputConfig getDefaultInstanceForType() { return com.google.cloud.automl.v1beta1.InputConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.automl.v1beta1.InputConfig build() { com.google.cloud.automl.v1beta1.InputConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.automl.v1beta1.InputConfig buildPartial() { com.google.cloud.automl.v1beta1.InputConfig result = new com.google.cloud.automl.v1beta1.InputConfig(this); if (bitField0_ != 0) { buildPartial0(result); } buildPartialOneofs(result); onBuilt(); return result; } private void buildPartial0(com.google.cloud.automl.v1beta1.InputConfig result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000004) != 0)) { result.params_ = internalGetParams(); result.params_.makeImmutable(); } } private void buildPartialOneofs(com.google.cloud.automl.v1beta1.InputConfig result) { result.sourceCase_ = sourceCase_; result.source_ = this.source_; if (sourceCase_ == 1 && gcsSourceBuilder_ != null) { result.source_ = gcsSourceBuilder_.build(); } if (sourceCase_ == 3 && bigquerySourceBuilder_ != null) { result.source_ = bigquerySourceBuilder_.build(); } } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.automl.v1beta1.InputConfig) { return mergeFrom((com.google.cloud.automl.v1beta1.InputConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.automl.v1beta1.InputConfig other) { if (other == com.google.cloud.automl.v1beta1.InputConfig.getDefaultInstance()) return this; internalGetMutableParams().mergeFrom(other.internalGetParams()); bitField0_ |= 0x00000004; switch (other.getSourceCase()) { case GCS_SOURCE: { mergeGcsSource(other.getGcsSource()); break; } case BIGQUERY_SOURCE: { mergeBigquerySource(other.getBigquerySource()); break; } case SOURCE_NOT_SET: { break; } } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { input.readMessage(getGcsSourceFieldBuilder().getBuilder(), extensionRegistry); sourceCase_ = 1; break; } // case 10 case 18: { com.google.protobuf.MapEntry params__ = input.readMessage( ParamsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); internalGetMutableParams() .getMutableMap() .put(params__.getKey(), params__.getValue()); bitField0_ |= 0x00000004; break; } // case 18 case 26: { input.readMessage(getBigquerySourceFieldBuilder().getBuilder(), extensionRegistry); sourceCase_ = 3; break; } // case 26 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int sourceCase_ = 0; private java.lang.Object source_; public SourceCase getSourceCase() { return SourceCase.forNumber(sourceCase_); } public Builder clearSource() { sourceCase_ = 0; source_ = null; onChanged(); return this; } private int bitField0_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.GcsSource, com.google.cloud.automl.v1beta1.GcsSource.Builder, com.google.cloud.automl.v1beta1.GcsSourceOrBuilder> gcsSourceBuilder_; /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; * * @return Whether the gcsSource field is set. */ @java.lang.Override public boolean hasGcsSource() { return sourceCase_ == 1; } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; * * @return The gcsSource. */ @java.lang.Override public com.google.cloud.automl.v1beta1.GcsSource getGcsSource() { if (gcsSourceBuilder_ == null) { if (sourceCase_ == 1) { return (com.google.cloud.automl.v1beta1.GcsSource) source_; } return com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } else { if (sourceCase_ == 1) { return gcsSourceBuilder_.getMessage(); } return com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ public Builder setGcsSource(com.google.cloud.automl.v1beta1.GcsSource value) { if (gcsSourceBuilder_ == null) { if (value == null) { throw new NullPointerException(); } source_ = value; onChanged(); } else { gcsSourceBuilder_.setMessage(value); } sourceCase_ = 1; return this; } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ public Builder setGcsSource(com.google.cloud.automl.v1beta1.GcsSource.Builder builderForValue) { if (gcsSourceBuilder_ == null) { source_ = builderForValue.build(); onChanged(); } else { gcsSourceBuilder_.setMessage(builderForValue.build()); } sourceCase_ = 1; return this; } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ public Builder mergeGcsSource(com.google.cloud.automl.v1beta1.GcsSource value) { if (gcsSourceBuilder_ == null) { if (sourceCase_ == 1 && source_ != com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance()) { source_ = com.google.cloud.automl.v1beta1.GcsSource.newBuilder( (com.google.cloud.automl.v1beta1.GcsSource) source_) .mergeFrom(value) .buildPartial(); } else { source_ = value; } onChanged(); } else { if (sourceCase_ == 1) { gcsSourceBuilder_.mergeFrom(value); } else { gcsSourceBuilder_.setMessage(value); } } sourceCase_ = 1; return this; } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ public Builder clearGcsSource() { if (gcsSourceBuilder_ == null) { if (sourceCase_ == 1) { sourceCase_ = 0; source_ = null; onChanged(); } } else { if (sourceCase_ == 1) { sourceCase_ = 0; source_ = null; } gcsSourceBuilder_.clear(); } return this; } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ public com.google.cloud.automl.v1beta1.GcsSource.Builder getGcsSourceBuilder() { return getGcsSourceFieldBuilder().getBuilder(); } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ @java.lang.Override public com.google.cloud.automl.v1beta1.GcsSourceOrBuilder getGcsSourceOrBuilder() { if ((sourceCase_ == 1) && (gcsSourceBuilder_ != null)) { return gcsSourceBuilder_.getMessageOrBuilder(); } else { if (sourceCase_ == 1) { return (com.google.cloud.automl.v1beta1.GcsSource) source_; } return com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } } /** * * *
     * The Google Cloud Storage location for the input content.
     * In ImportData, the gcs_source points to a csv with structure described in
     * the comment.
     * 
* * .google.cloud.automl.v1beta1.GcsSource gcs_source = 1; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.GcsSource, com.google.cloud.automl.v1beta1.GcsSource.Builder, com.google.cloud.automl.v1beta1.GcsSourceOrBuilder> getGcsSourceFieldBuilder() { if (gcsSourceBuilder_ == null) { if (!(sourceCase_ == 1)) { source_ = com.google.cloud.automl.v1beta1.GcsSource.getDefaultInstance(); } gcsSourceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.GcsSource, com.google.cloud.automl.v1beta1.GcsSource.Builder, com.google.cloud.automl.v1beta1.GcsSourceOrBuilder>( (com.google.cloud.automl.v1beta1.GcsSource) source_, getParentForChildren(), isClean()); source_ = null; } sourceCase_ = 1; onChanged(); return gcsSourceBuilder_; } private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.BigQuerySource, com.google.cloud.automl.v1beta1.BigQuerySource.Builder, com.google.cloud.automl.v1beta1.BigQuerySourceOrBuilder> bigquerySourceBuilder_; /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; * * @return Whether the bigquerySource field is set. */ @java.lang.Override public boolean hasBigquerySource() { return sourceCase_ == 3; } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; * * @return The bigquerySource. */ @java.lang.Override public com.google.cloud.automl.v1beta1.BigQuerySource getBigquerySource() { if (bigquerySourceBuilder_ == null) { if (sourceCase_ == 3) { return (com.google.cloud.automl.v1beta1.BigQuerySource) source_; } return com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } else { if (sourceCase_ == 3) { return bigquerySourceBuilder_.getMessage(); } return com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ public Builder setBigquerySource(com.google.cloud.automl.v1beta1.BigQuerySource value) { if (bigquerySourceBuilder_ == null) { if (value == null) { throw new NullPointerException(); } source_ = value; onChanged(); } else { bigquerySourceBuilder_.setMessage(value); } sourceCase_ = 3; return this; } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ public Builder setBigquerySource( com.google.cloud.automl.v1beta1.BigQuerySource.Builder builderForValue) { if (bigquerySourceBuilder_ == null) { source_ = builderForValue.build(); onChanged(); } else { bigquerySourceBuilder_.setMessage(builderForValue.build()); } sourceCase_ = 3; return this; } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ public Builder mergeBigquerySource(com.google.cloud.automl.v1beta1.BigQuerySource value) { if (bigquerySourceBuilder_ == null) { if (sourceCase_ == 3 && source_ != com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance()) { source_ = com.google.cloud.automl.v1beta1.BigQuerySource.newBuilder( (com.google.cloud.automl.v1beta1.BigQuerySource) source_) .mergeFrom(value) .buildPartial(); } else { source_ = value; } onChanged(); } else { if (sourceCase_ == 3) { bigquerySourceBuilder_.mergeFrom(value); } else { bigquerySourceBuilder_.setMessage(value); } } sourceCase_ = 3; return this; } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ public Builder clearBigquerySource() { if (bigquerySourceBuilder_ == null) { if (sourceCase_ == 3) { sourceCase_ = 0; source_ = null; onChanged(); } } else { if (sourceCase_ == 3) { sourceCase_ = 0; source_ = null; } bigquerySourceBuilder_.clear(); } return this; } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ public com.google.cloud.automl.v1beta1.BigQuerySource.Builder getBigquerySourceBuilder() { return getBigquerySourceFieldBuilder().getBuilder(); } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ @java.lang.Override public com.google.cloud.automl.v1beta1.BigQuerySourceOrBuilder getBigquerySourceOrBuilder() { if ((sourceCase_ == 3) && (bigquerySourceBuilder_ != null)) { return bigquerySourceBuilder_.getMessageOrBuilder(); } else { if (sourceCase_ == 3) { return (com.google.cloud.automl.v1beta1.BigQuerySource) source_; } return com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } } /** * * *
     * The BigQuery location for the input content.
     * 
* * .google.cloud.automl.v1beta1.BigQuerySource bigquery_source = 3; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.BigQuerySource, com.google.cloud.automl.v1beta1.BigQuerySource.Builder, com.google.cloud.automl.v1beta1.BigQuerySourceOrBuilder> getBigquerySourceFieldBuilder() { if (bigquerySourceBuilder_ == null) { if (!(sourceCase_ == 3)) { source_ = com.google.cloud.automl.v1beta1.BigQuerySource.getDefaultInstance(); } bigquerySourceBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.automl.v1beta1.BigQuerySource, com.google.cloud.automl.v1beta1.BigQuerySource.Builder, com.google.cloud.automl.v1beta1.BigQuerySourceOrBuilder>( (com.google.cloud.automl.v1beta1.BigQuerySource) source_, getParentForChildren(), isClean()); source_ = null; } sourceCase_ = 3; onChanged(); return bigquerySourceBuilder_; } private com.google.protobuf.MapField params_; private com.google.protobuf.MapField internalGetParams() { if (params_ == null) { return com.google.protobuf.MapField.emptyMapField(ParamsDefaultEntryHolder.defaultEntry); } return params_; } private com.google.protobuf.MapField internalGetMutableParams() { if (params_ == null) { params_ = com.google.protobuf.MapField.newMapField(ParamsDefaultEntryHolder.defaultEntry); } if (!params_.isMutable()) { params_ = params_.copy(); } bitField0_ |= 0x00000004; onChanged(); return params_; } public int getParamsCount() { return internalGetParams().getMap().size(); } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ @java.lang.Override public boolean containsParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } return internalGetParams().getMap().containsKey(key); } /** Use {@link #getParamsMap()} instead. */ @java.lang.Override @java.lang.Deprecated public java.util.Map getParams() { return getParamsMap(); } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ @java.lang.Override public java.util.Map getParamsMap() { return internalGetParams().getMap(); } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ @java.lang.Override public /* nullable */ java.lang.String getParamsOrDefault( java.lang.String key, /* nullable */ java.lang.String defaultValue) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); return map.containsKey(key) ? map.get(key) : defaultValue; } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ @java.lang.Override public java.lang.String getParamsOrThrow(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } java.util.Map map = internalGetParams().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public Builder clearParams() { bitField0_ = (bitField0_ & ~0x00000004); internalGetMutableParams().getMutableMap().clear(); return this; } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ public Builder removeParams(java.lang.String key) { if (key == null) { throw new NullPointerException("map key"); } internalGetMutableParams().getMutableMap().remove(key); return this; } /** Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map getMutableParams() { bitField0_ |= 0x00000004; return internalGetMutableParams().getMutableMap(); } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ public Builder putParams(java.lang.String key, java.lang.String value) { if (key == null) { throw new NullPointerException("map key"); } if (value == null) { throw new NullPointerException("map value"); } internalGetMutableParams().getMutableMap().put(key, value); bitField0_ |= 0x00000004; return this; } /** * * *
     * Additional domain-specific parameters describing the semantic of the
     * imported data, any string must be up to 25000
     * characters long.
     *
     * *  For Tables:
     *    `schema_inference_version` - (integer) Required. The version of the
     *        algorithm that should be used for the initial inference of the
     *        schema (columns' DataTypes) of the table the data is being imported
     *        into. Allowed values: "1".
     * 
* * map<string, string> params = 2; */ public Builder putAllParams(java.util.Map values) { internalGetMutableParams().getMutableMap().putAll(values); bitField0_ |= 0x00000004; return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.automl.v1beta1.InputConfig) } // @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig) private static final com.google.cloud.automl.v1beta1.InputConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.automl.v1beta1.InputConfig(); } public static com.google.cloud.automl.v1beta1.InputConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public InputConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.automl.v1beta1.InputConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy