com.clarifai.grpc.api.AutoscaleConfigOrBuilder Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: proto/clarifai/api/resources.proto
package com.clarifai.grpc.api;
public interface AutoscaleConfigOrBuilder extends
// @@protoc_insertion_point(interface_extends:clarifai.api.AutoscaleConfig)
com.google.protobuf.MessageOrBuilder {
/**
*
* The minimum number of replicas for the runner to have.
* Defaults to 0 which means autoscaling can scale down to zero.
* If you want a replica always up then set to >= 1.
*
*
* uint32 min_replicas = 1;
* @return The minReplicas.
*/
int getMinReplicas();
/**
*
* The maximium number of replicas to scale up the runner to.
*
*
* uint32 max_replicas = 2;
* @return The maxReplicas.
*/
int getMaxReplicas();
/**
*
* The number of seconds of traffic history to consider when autoscaling.
*
*
* uint32 traffic_history_seconds = 3;
* @return The trafficHistorySeconds.
*/
int getTrafficHistorySeconds();
/**
*
* The time to wait before scaling down after the last request.
*
*
* uint32 scale_down_delay_seconds = 4;
* @return The scaleDownDelaySeconds.
*/
int getScaleDownDelaySeconds();
/**
*
* The time to wait between scaling up replicas without burst traffic.
*
*
* uint32 scale_up_delay_seconds = 5;
* @return The scaleUpDelaySeconds.
*/
int getScaleUpDelaySeconds();
/**
*
* Depending on your plan you may be able to enable packing of resources into a single node
* for more compute and cost efficiency.
*
*
* bool enable_packing = 6;
* @return The enablePacking.
*/
boolean getEnablePacking();
}