All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.clarifai.grpc.api.DeploymentOrBuilder Maven / Gradle / Ivy

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: proto/clarifai/api/resources.proto

package com.clarifai.grpc.api;

public interface DeploymentOrBuilder extends
    // @@protoc_insertion_point(interface_extends:clarifai.api.Deployment)
    com.google.protobuf.MessageOrBuilder {

  /**
   * 
   * An id for this configured deployment.
   * 
* * string id = 1; * @return The id. */ java.lang.String getId(); /** *
   * An id for this configured deployment.
   * 
* * string id = 1; * @return The bytes for id. */ com.google.protobuf.ByteString getIdBytes(); /** *
   * The user who owns the deployment. These live in the user/org account.
   * 
* * string user_id = 2; * @return The userId. */ java.lang.String getUserId(); /** *
   * The user who owns the deployment. These live in the user/org account.
   * 
* * string user_id = 2; * @return The bytes for userId. */ com.google.protobuf.ByteString getUserIdBytes(); /** *
   * How to autoscale the object.
   * 
* * .clarifai.api.AutoscaleConfig autoscale_config = 3; * @return Whether the autoscaleConfig field is set. */ boolean hasAutoscaleConfig(); /** *
   * How to autoscale the object.
   * 
* * .clarifai.api.AutoscaleConfig autoscale_config = 3; * @return The autoscaleConfig. */ com.clarifai.grpc.api.AutoscaleConfig getAutoscaleConfig(); /** *
   * How to autoscale the object.
   * 
* * .clarifai.api.AutoscaleConfig autoscale_config = 3; */ com.clarifai.grpc.api.AutoscaleConfigOrBuilder getAutoscaleConfigOrBuilder(); /** *
   * You can configure different autoscaling per nodepool(s).
   * These nodepools have to be also owned by the same user_id/org as this deployment.
   * If there is more than one nodepool we use the model's ComputeInfo to match
   * with what the nodepool provides to decide which one can handle it combined with the
   * NodepoolRank below. Note: even within a single nodepool if it is heterogeneous then
   * we need a way to rank scheduling choices when we don't know how to decide (like a model
   * supports
   * 
* * repeated .clarifai.api.Nodepool nodepools = 4; */ java.util.List getNodepoolsList(); /** *
   * You can configure different autoscaling per nodepool(s).
   * These nodepools have to be also owned by the same user_id/org as this deployment.
   * If there is more than one nodepool we use the model's ComputeInfo to match
   * with what the nodepool provides to decide which one can handle it combined with the
   * NodepoolRank below. Note: even within a single nodepool if it is heterogeneous then
   * we need a way to rank scheduling choices when we don't know how to decide (like a model
   * supports
   * 
* * repeated .clarifai.api.Nodepool nodepools = 4; */ com.clarifai.grpc.api.Nodepool getNodepools(int index); /** *
   * You can configure different autoscaling per nodepool(s).
   * These nodepools have to be also owned by the same user_id/org as this deployment.
   * If there is more than one nodepool we use the model's ComputeInfo to match
   * with what the nodepool provides to decide which one can handle it combined with the
   * NodepoolRank below. Note: even within a single nodepool if it is heterogeneous then
   * we need a way to rank scheduling choices when we don't know how to decide (like a model
   * supports
   * 
* * repeated .clarifai.api.Nodepool nodepools = 4; */ int getNodepoolsCount(); /** *
   * You can configure different autoscaling per nodepool(s).
   * These nodepools have to be also owned by the same user_id/org as this deployment.
   * If there is more than one nodepool we use the model's ComputeInfo to match
   * with what the nodepool provides to decide which one can handle it combined with the
   * NodepoolRank below. Note: even within a single nodepool if it is heterogeneous then
   * we need a way to rank scheduling choices when we don't know how to decide (like a model
   * supports
   * 
* * repeated .clarifai.api.Nodepool nodepools = 4; */ java.util.List getNodepoolsOrBuilderList(); /** *
   * You can configure different autoscaling per nodepool(s).
   * These nodepools have to be also owned by the same user_id/org as this deployment.
   * If there is more than one nodepool we use the model's ComputeInfo to match
   * with what the nodepool provides to decide which one can handle it combined with the
   * NodepoolRank below. Note: even within a single nodepool if it is heterogeneous then
   * we need a way to rank scheduling choices when we don't know how to decide (like a model
   * supports
   * 
* * repeated .clarifai.api.Nodepool nodepools = 4; */ com.clarifai.grpc.api.NodepoolOrBuilder getNodepoolsOrBuilder( int index); /** *
   * Model
   * 
* * .clarifai.api.Model model = 5; * @return Whether the model field is set. */ boolean hasModel(); /** *
   * Model
   * 
* * .clarifai.api.Model model = 5; * @return The model. */ com.clarifai.grpc.api.Model getModel(); /** *
   * Model
   * 
* * .clarifai.api.Model model = 5; */ com.clarifai.grpc.api.ModelOrBuilder getModelOrBuilder(); /** *
   * Workflow
   * 
* * .clarifai.api.Workflow workflow = 6; * @return Whether the workflow field is set. */ boolean hasWorkflow(); /** *
   * Workflow
   * 
* * .clarifai.api.Workflow workflow = 6; * @return The workflow. */ com.clarifai.grpc.api.Workflow getWorkflow(); /** *
   * Workflow
   * 
* * .clarifai.api.Workflow workflow = 6; */ com.clarifai.grpc.api.WorkflowOrBuilder getWorkflowOrBuilder(); /** * .clarifai.api.Deployment.SchedulingChoice scheduling_choice = 7; * @return The enum numeric value on the wire for schedulingChoice. */ int getSchedulingChoiceValue(); /** * .clarifai.api.Deployment.SchedulingChoice scheduling_choice = 7; * @return The schedulingChoice. */ com.clarifai.grpc.api.Deployment.SchedulingChoice getSchedulingChoice(); public com.clarifai.grpc.api.Deployment.ObjectCase getObjectCase(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy