All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.automl.v1beta1.ImageClassificationModelMetadataOrBuilder Maven / Gradle / Ivy

There is a newer version: 0.141.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/automl/v1beta1/image.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.automl.v1beta1;

public interface ImageClassificationModelMetadataOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.automl.v1beta1.ImageClassificationModelMetadata)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * Optional. The ID of the `base` model. If it is specified, the new model
   * will be created based on the `base` model. Otherwise, the new model will be
   * created from scratch. The `base` model must be in the same
   * `project` and `location` as the new model to create, and have the same
   * `model_type`.
   * 
* * string base_model_id = 1; * * @return The baseModelId. */ java.lang.String getBaseModelId(); /** * * *
   * Optional. The ID of the `base` model. If it is specified, the new model
   * will be created based on the `base` model. Otherwise, the new model will be
   * created from scratch. The `base` model must be in the same
   * `project` and `location` as the new model to create, and have the same
   * `model_type`.
   * 
* * string base_model_id = 1; * * @return The bytes for baseModelId. */ com.google.protobuf.ByteString getBaseModelIdBytes(); /** * * *
   * Required. The train budget of creating this model, expressed in hours. The
   * actual `train_cost` will be equal or less than this value.
   * 
* * int64 train_budget = 2; * * @return The trainBudget. */ long getTrainBudget(); /** * * *
   * Output only. The actual train cost of creating this model, expressed in
   * hours. If this model is created from a `base` model, the train cost used
   * to create the `base` model are not included.
   * 
* * int64 train_cost = 3; * * @return The trainCost. */ long getTrainCost(); /** * * *
   * Output only. The reason that this create model operation stopped,
   * e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
   * 
* * string stop_reason = 5; * * @return The stopReason. */ java.lang.String getStopReason(); /** * * *
   * Output only. The reason that this create model operation stopped,
   * e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
   * 
* * string stop_reason = 5; * * @return The bytes for stopReason. */ com.google.protobuf.ByteString getStopReasonBytes(); /** * * *
   * Optional. Type of the model. The available values are:
   * *   `cloud` - Model to be used via prediction calls to AutoML API.
   *               This is the default value.
   * *   `mobile-low-latency-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards. Expected to have low latency, but
   *               may have lower prediction quality than other models.
   * *   `mobile-versatile-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards.
   * *   `mobile-high-accuracy-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards.  Expected to have a higher
   *               latency, but should also have a higher prediction quality
   *               than other models.
   * *   `mobile-core-ml-low-latency-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
   *               ML afterwards. Expected to have low latency, but may have
   *               lower prediction quality than other models.
   * *   `mobile-core-ml-versatile-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
   *               ML afterwards.
   * *   `mobile-core-ml-high-accuracy-1` - A model that, in addition to
   *               providing prediction via AutoML API, can also be exported
   *               (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with
   *               Core ML afterwards.  Expected to have a higher latency, but
   *               should also have a higher prediction quality than other
   *               models.
   * 
* * string model_type = 7; * * @return The modelType. */ java.lang.String getModelType(); /** * * *
   * Optional. Type of the model. The available values are:
   * *   `cloud` - Model to be used via prediction calls to AutoML API.
   *               This is the default value.
   * *   `mobile-low-latency-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards. Expected to have low latency, but
   *               may have lower prediction quality than other models.
   * *   `mobile-versatile-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards.
   * *   `mobile-high-accuracy-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device
   *               with TensorFlow afterwards.  Expected to have a higher
   *               latency, but should also have a higher prediction quality
   *               than other models.
   * *   `mobile-core-ml-low-latency-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
   *               ML afterwards. Expected to have low latency, but may have
   *               lower prediction quality than other models.
   * *   `mobile-core-ml-versatile-1` - A model that, in addition to providing
   *               prediction via AutoML API, can also be exported (see
   *               [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core
   *               ML afterwards.
   * *   `mobile-core-ml-high-accuracy-1` - A model that, in addition to
   *               providing prediction via AutoML API, can also be exported
   *               (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with
   *               Core ML afterwards.  Expected to have a higher latency, but
   *               should also have a higher prediction quality than other
   *               models.
   * 
* * string model_type = 7; * * @return The bytes for modelType. */ com.google.protobuf.ByteString getModelTypeBytes(); /** * * *
   * Output only. An approximate number of online prediction QPS that can
   * be supported by this model per each node on which it is deployed.
   * 
* * double node_qps = 13; * * @return The nodeQps. */ double getNodeQps(); /** * * *
   * Output only. The number of nodes this model is deployed on. A node is an
   * abstraction of a machine resource, which can handle online prediction QPS
   * as given in the node_qps field.
   * 
* * int64 node_count = 14; * * @return The nodeCount. */ long getNodeCount(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy