tensorflow.serving.SessionBundleConfigOuterClass Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow_serving/servables/tensorflow/session_bundle_config.proto
package tensorflow.serving;
public final class SessionBundleConfigOuterClass {
private SessionBundleConfigOuterClass() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public interface SessionBundleConfigOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.serving.SessionBundleConfig)
com.google.protobuf.MessageOrBuilder {
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
java.lang.String getSessionTarget();
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
com.google.protobuf.ByteString
getSessionTargetBytes();
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
boolean hasSessionConfig();
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
org.tensorflow.framework.ConfigProto getSessionConfig();
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
org.tensorflow.framework.ConfigProtoOrBuilder getSessionConfigOrBuilder();
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
boolean hasBatchingParameters();
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getBatchingParameters();
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder getBatchingParametersOrBuilder();
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
boolean hasSessionRunLoadThreadpoolIndex();
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
com.google.protobuf.Int32Value getSessionRunLoadThreadpoolIndex();
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
com.google.protobuf.Int32ValueOrBuilder getSessionRunLoadThreadpoolIndexOrBuilder();
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Transient memory used while loading a model, which is released once the
* loading phase has completed. (This is on top of the memory used in steady-
* state while the model is in memory after it has finished loading.)
* TODO(b/38376838): This is a temporary hack, and it applies to all models.
* Remove it once resource estimates are moved inside SavedModel.
*
*
* uint64 experimental_transient_ram_bytes_during_load = 5;
*/
long getExperimentalTransientRamBytesDuringLoad();
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
java.util.List
getSavedModelTagsList();
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
int getSavedModelTagsCount();
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
java.lang.String getSavedModelTags(int index);
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
com.google.protobuf.ByteString
getSavedModelTagsBytes(int index);
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
java.util.List
getExperimentalFixedInputTensorsList();
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
org.tensorflow.framework.NamedTensorProto getExperimentalFixedInputTensors(int index);
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
int getExperimentalFixedInputTensorsCount();
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
java.util.List extends org.tensorflow.framework.NamedTensorProtoOrBuilder>
getExperimentalFixedInputTensorsOrBuilderList();
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
org.tensorflow.framework.NamedTensorProtoOrBuilder getExperimentalFixedInputTensorsOrBuilder(
int index);
/**
*
* Enables model warmup.
*
*
* bool enable_model_warmup = 779;
*/
boolean getEnableModelWarmup();
}
/**
*
* Configuration parameters for a SessionBundle, with optional batching.
*
*
* Protobuf type {@code tensorflow.serving.SessionBundleConfig}
*/
public static final class SessionBundleConfig extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.serving.SessionBundleConfig)
SessionBundleConfigOrBuilder {
private static final long serialVersionUID = 0L;
// Use SessionBundleConfig.newBuilder() to construct.
private SessionBundleConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private SessionBundleConfig() {
sessionTarget_ = "";
experimentalTransientRamBytesDuringLoad_ = 0L;
savedModelTags_ = com.google.protobuf.LazyStringArrayList.EMPTY;
experimentalFixedInputTensors_ = java.util.Collections.emptyList();
enableModelWarmup_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SessionBundleConfig(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
java.lang.String s = input.readStringRequireUtf8();
sessionTarget_ = s;
break;
}
case 18: {
org.tensorflow.framework.ConfigProto.Builder subBuilder = null;
if (sessionConfig_ != null) {
subBuilder = sessionConfig_.toBuilder();
}
sessionConfig_ = input.readMessage(org.tensorflow.framework.ConfigProto.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(sessionConfig_);
sessionConfig_ = subBuilder.buildPartial();
}
break;
}
case 26: {
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder subBuilder = null;
if (batchingParameters_ != null) {
subBuilder = batchingParameters_.toBuilder();
}
batchingParameters_ = input.readMessage(tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(batchingParameters_);
batchingParameters_ = subBuilder.buildPartial();
}
break;
}
case 34: {
com.google.protobuf.Int32Value.Builder subBuilder = null;
if (sessionRunLoadThreadpoolIndex_ != null) {
subBuilder = sessionRunLoadThreadpoolIndex_.toBuilder();
}
sessionRunLoadThreadpoolIndex_ = input.readMessage(com.google.protobuf.Int32Value.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(sessionRunLoadThreadpoolIndex_);
sessionRunLoadThreadpoolIndex_ = subBuilder.buildPartial();
}
break;
}
case 40: {
experimentalTransientRamBytesDuringLoad_ = input.readUInt64();
break;
}
case 50: {
java.lang.String s = input.readStringRequireUtf8();
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
savedModelTags_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000020;
}
savedModelTags_.add(s);
break;
}
case 6226: {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
experimentalFixedInputTensors_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
experimentalFixedInputTensors_.add(
input.readMessage(org.tensorflow.framework.NamedTensorProto.parser(), extensionRegistry));
break;
}
case 6232: {
enableModelWarmup_ = input.readBool();
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
savedModelTags_ = savedModelTags_.getUnmodifiableView();
}
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
experimentalFixedInputTensors_ = java.util.Collections.unmodifiableList(experimentalFixedInputTensors_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_SessionBundleConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_SessionBundleConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.class, tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.Builder.class);
}
private int bitField0_;
public static final int SESSION_TARGET_FIELD_NUMBER = 1;
private volatile java.lang.Object sessionTarget_;
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public java.lang.String getSessionTarget() {
java.lang.Object ref = sessionTarget_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
sessionTarget_ = s;
return s;
}
}
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public com.google.protobuf.ByteString
getSessionTargetBytes() {
java.lang.Object ref = sessionTarget_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
sessionTarget_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int SESSION_CONFIG_FIELD_NUMBER = 2;
private org.tensorflow.framework.ConfigProto sessionConfig_;
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public boolean hasSessionConfig() {
return sessionConfig_ != null;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public org.tensorflow.framework.ConfigProto getSessionConfig() {
return sessionConfig_ == null ? org.tensorflow.framework.ConfigProto.getDefaultInstance() : sessionConfig_;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public org.tensorflow.framework.ConfigProtoOrBuilder getSessionConfigOrBuilder() {
return getSessionConfig();
}
public static final int BATCHING_PARAMETERS_FIELD_NUMBER = 3;
private tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters batchingParameters_;
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public boolean hasBatchingParameters() {
return batchingParameters_ != null;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getBatchingParameters() {
return batchingParameters_ == null ? tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.getDefaultInstance() : batchingParameters_;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder getBatchingParametersOrBuilder() {
return getBatchingParameters();
}
public static final int SESSION_RUN_LOAD_THREADPOOL_INDEX_FIELD_NUMBER = 4;
private com.google.protobuf.Int32Value sessionRunLoadThreadpoolIndex_;
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public boolean hasSessionRunLoadThreadpoolIndex() {
return sessionRunLoadThreadpoolIndex_ != null;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public com.google.protobuf.Int32Value getSessionRunLoadThreadpoolIndex() {
return sessionRunLoadThreadpoolIndex_ == null ? com.google.protobuf.Int32Value.getDefaultInstance() : sessionRunLoadThreadpoolIndex_;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public com.google.protobuf.Int32ValueOrBuilder getSessionRunLoadThreadpoolIndexOrBuilder() {
return getSessionRunLoadThreadpoolIndex();
}
public static final int EXPERIMENTAL_TRANSIENT_RAM_BYTES_DURING_LOAD_FIELD_NUMBER = 5;
private long experimentalTransientRamBytesDuringLoad_;
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Transient memory used while loading a model, which is released once the
* loading phase has completed. (This is on top of the memory used in steady-
* state while the model is in memory after it has finished loading.)
* TODO(b/38376838): This is a temporary hack, and it applies to all models.
* Remove it once resource estimates are moved inside SavedModel.
*
*
* uint64 experimental_transient_ram_bytes_during_load = 5;
*/
public long getExperimentalTransientRamBytesDuringLoad() {
return experimentalTransientRamBytesDuringLoad_;
}
public static final int SAVED_MODEL_TAGS_FIELD_NUMBER = 6;
private com.google.protobuf.LazyStringList savedModelTags_;
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public com.google.protobuf.ProtocolStringList
getSavedModelTagsList() {
return savedModelTags_;
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public int getSavedModelTagsCount() {
return savedModelTags_.size();
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public java.lang.String getSavedModelTags(int index) {
return savedModelTags_.get(index);
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public com.google.protobuf.ByteString
getSavedModelTagsBytes(int index) {
return savedModelTags_.getByteString(index);
}
public static final int EXPERIMENTAL_FIXED_INPUT_TENSORS_FIELD_NUMBER = 778;
private java.util.List experimentalFixedInputTensors_;
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public java.util.List getExperimentalFixedInputTensorsList() {
return experimentalFixedInputTensors_;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public java.util.List extends org.tensorflow.framework.NamedTensorProtoOrBuilder>
getExperimentalFixedInputTensorsOrBuilderList() {
return experimentalFixedInputTensors_;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public int getExperimentalFixedInputTensorsCount() {
return experimentalFixedInputTensors_.size();
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProto getExperimentalFixedInputTensors(int index) {
return experimentalFixedInputTensors_.get(index);
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProtoOrBuilder getExperimentalFixedInputTensorsOrBuilder(
int index) {
return experimentalFixedInputTensors_.get(index);
}
public static final int ENABLE_MODEL_WARMUP_FIELD_NUMBER = 779;
private boolean enableModelWarmup_;
/**
*
* Enables model warmup.
*
*
* bool enable_model_warmup = 779;
*/
public boolean getEnableModelWarmup() {
return enableModelWarmup_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!getSessionTargetBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, sessionTarget_);
}
if (sessionConfig_ != null) {
output.writeMessage(2, getSessionConfig());
}
if (batchingParameters_ != null) {
output.writeMessage(3, getBatchingParameters());
}
if (sessionRunLoadThreadpoolIndex_ != null) {
output.writeMessage(4, getSessionRunLoadThreadpoolIndex());
}
if (experimentalTransientRamBytesDuringLoad_ != 0L) {
output.writeUInt64(5, experimentalTransientRamBytesDuringLoad_);
}
for (int i = 0; i < savedModelTags_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 6, savedModelTags_.getRaw(i));
}
for (int i = 0; i < experimentalFixedInputTensors_.size(); i++) {
output.writeMessage(778, experimentalFixedInputTensors_.get(i));
}
if (enableModelWarmup_ != false) {
output.writeBool(779, enableModelWarmup_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!getSessionTargetBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, sessionTarget_);
}
if (sessionConfig_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getSessionConfig());
}
if (batchingParameters_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, getBatchingParameters());
}
if (sessionRunLoadThreadpoolIndex_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, getSessionRunLoadThreadpoolIndex());
}
if (experimentalTransientRamBytesDuringLoad_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, experimentalTransientRamBytesDuringLoad_);
}
{
int dataSize = 0;
for (int i = 0; i < savedModelTags_.size(); i++) {
dataSize += computeStringSizeNoTag(savedModelTags_.getRaw(i));
}
size += dataSize;
size += 1 * getSavedModelTagsList().size();
}
for (int i = 0; i < experimentalFixedInputTensors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(778, experimentalFixedInputTensors_.get(i));
}
if (enableModelWarmup_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(779, enableModelWarmup_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig)) {
return super.equals(obj);
}
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig other = (tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig) obj;
boolean result = true;
result = result && getSessionTarget()
.equals(other.getSessionTarget());
result = result && (hasSessionConfig() == other.hasSessionConfig());
if (hasSessionConfig()) {
result = result && getSessionConfig()
.equals(other.getSessionConfig());
}
result = result && (hasBatchingParameters() == other.hasBatchingParameters());
if (hasBatchingParameters()) {
result = result && getBatchingParameters()
.equals(other.getBatchingParameters());
}
result = result && (hasSessionRunLoadThreadpoolIndex() == other.hasSessionRunLoadThreadpoolIndex());
if (hasSessionRunLoadThreadpoolIndex()) {
result = result && getSessionRunLoadThreadpoolIndex()
.equals(other.getSessionRunLoadThreadpoolIndex());
}
result = result && (getExperimentalTransientRamBytesDuringLoad()
== other.getExperimentalTransientRamBytesDuringLoad());
result = result && getSavedModelTagsList()
.equals(other.getSavedModelTagsList());
result = result && getExperimentalFixedInputTensorsList()
.equals(other.getExperimentalFixedInputTensorsList());
result = result && (getEnableModelWarmup()
== other.getEnableModelWarmup());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + SESSION_TARGET_FIELD_NUMBER;
hash = (53 * hash) + getSessionTarget().hashCode();
if (hasSessionConfig()) {
hash = (37 * hash) + SESSION_CONFIG_FIELD_NUMBER;
hash = (53 * hash) + getSessionConfig().hashCode();
}
if (hasBatchingParameters()) {
hash = (37 * hash) + BATCHING_PARAMETERS_FIELD_NUMBER;
hash = (53 * hash) + getBatchingParameters().hashCode();
}
if (hasSessionRunLoadThreadpoolIndex()) {
hash = (37 * hash) + SESSION_RUN_LOAD_THREADPOOL_INDEX_FIELD_NUMBER;
hash = (53 * hash) + getSessionRunLoadThreadpoolIndex().hashCode();
}
hash = (37 * hash) + EXPERIMENTAL_TRANSIENT_RAM_BYTES_DURING_LOAD_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getExperimentalTransientRamBytesDuringLoad());
if (getSavedModelTagsCount() > 0) {
hash = (37 * hash) + SAVED_MODEL_TAGS_FIELD_NUMBER;
hash = (53 * hash) + getSavedModelTagsList().hashCode();
}
if (getExperimentalFixedInputTensorsCount() > 0) {
hash = (37 * hash) + EXPERIMENTAL_FIXED_INPUT_TENSORS_FIELD_NUMBER;
hash = (53 * hash) + getExperimentalFixedInputTensorsList().hashCode();
}
hash = (37 * hash) + ENABLE_MODEL_WARMUP_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getEnableModelWarmup());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Configuration parameters for a SessionBundle, with optional batching.
*
*
* Protobuf type {@code tensorflow.serving.SessionBundleConfig}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.serving.SessionBundleConfig)
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfigOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_SessionBundleConfig_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_SessionBundleConfig_fieldAccessorTable
.ensureFieldAccessorsInitialized(
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.class, tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.Builder.class);
}
// Construct using tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getExperimentalFixedInputTensorsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
sessionTarget_ = "";
if (sessionConfigBuilder_ == null) {
sessionConfig_ = null;
} else {
sessionConfig_ = null;
sessionConfigBuilder_ = null;
}
if (batchingParametersBuilder_ == null) {
batchingParameters_ = null;
} else {
batchingParameters_ = null;
batchingParametersBuilder_ = null;
}
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
sessionRunLoadThreadpoolIndex_ = null;
} else {
sessionRunLoadThreadpoolIndex_ = null;
sessionRunLoadThreadpoolIndexBuilder_ = null;
}
experimentalTransientRamBytesDuringLoad_ = 0L;
savedModelTags_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000020);
if (experimentalFixedInputTensorsBuilder_ == null) {
experimentalFixedInputTensors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
} else {
experimentalFixedInputTensorsBuilder_.clear();
}
enableModelWarmup_ = false;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_SessionBundleConfig_descriptor;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig getDefaultInstanceForType() {
return tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.getDefaultInstance();
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig build() {
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig buildPartial() {
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig result = new tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
result.sessionTarget_ = sessionTarget_;
if (sessionConfigBuilder_ == null) {
result.sessionConfig_ = sessionConfig_;
} else {
result.sessionConfig_ = sessionConfigBuilder_.build();
}
if (batchingParametersBuilder_ == null) {
result.batchingParameters_ = batchingParameters_;
} else {
result.batchingParameters_ = batchingParametersBuilder_.build();
}
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
result.sessionRunLoadThreadpoolIndex_ = sessionRunLoadThreadpoolIndex_;
} else {
result.sessionRunLoadThreadpoolIndex_ = sessionRunLoadThreadpoolIndexBuilder_.build();
}
result.experimentalTransientRamBytesDuringLoad_ = experimentalTransientRamBytesDuringLoad_;
if (((bitField0_ & 0x00000020) == 0x00000020)) {
savedModelTags_ = savedModelTags_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000020);
}
result.savedModelTags_ = savedModelTags_;
if (experimentalFixedInputTensorsBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040)) {
experimentalFixedInputTensors_ = java.util.Collections.unmodifiableList(experimentalFixedInputTensors_);
bitField0_ = (bitField0_ & ~0x00000040);
}
result.experimentalFixedInputTensors_ = experimentalFixedInputTensors_;
} else {
result.experimentalFixedInputTensors_ = experimentalFixedInputTensorsBuilder_.build();
}
result.enableModelWarmup_ = enableModelWarmup_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig) {
return mergeFrom((tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig other) {
if (other == tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig.getDefaultInstance()) return this;
if (!other.getSessionTarget().isEmpty()) {
sessionTarget_ = other.sessionTarget_;
onChanged();
}
if (other.hasSessionConfig()) {
mergeSessionConfig(other.getSessionConfig());
}
if (other.hasBatchingParameters()) {
mergeBatchingParameters(other.getBatchingParameters());
}
if (other.hasSessionRunLoadThreadpoolIndex()) {
mergeSessionRunLoadThreadpoolIndex(other.getSessionRunLoadThreadpoolIndex());
}
if (other.getExperimentalTransientRamBytesDuringLoad() != 0L) {
setExperimentalTransientRamBytesDuringLoad(other.getExperimentalTransientRamBytesDuringLoad());
}
if (!other.savedModelTags_.isEmpty()) {
if (savedModelTags_.isEmpty()) {
savedModelTags_ = other.savedModelTags_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureSavedModelTagsIsMutable();
savedModelTags_.addAll(other.savedModelTags_);
}
onChanged();
}
if (experimentalFixedInputTensorsBuilder_ == null) {
if (!other.experimentalFixedInputTensors_.isEmpty()) {
if (experimentalFixedInputTensors_.isEmpty()) {
experimentalFixedInputTensors_ = other.experimentalFixedInputTensors_;
bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.addAll(other.experimentalFixedInputTensors_);
}
onChanged();
}
} else {
if (!other.experimentalFixedInputTensors_.isEmpty()) {
if (experimentalFixedInputTensorsBuilder_.isEmpty()) {
experimentalFixedInputTensorsBuilder_.dispose();
experimentalFixedInputTensorsBuilder_ = null;
experimentalFixedInputTensors_ = other.experimentalFixedInputTensors_;
bitField0_ = (bitField0_ & ~0x00000040);
experimentalFixedInputTensorsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getExperimentalFixedInputTensorsFieldBuilder() : null;
} else {
experimentalFixedInputTensorsBuilder_.addAllMessages(other.experimentalFixedInputTensors_);
}
}
}
if (other.getEnableModelWarmup() != false) {
setEnableModelWarmup(other.getEnableModelWarmup());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.lang.Object sessionTarget_ = "";
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public java.lang.String getSessionTarget() {
java.lang.Object ref = sessionTarget_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
sessionTarget_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public com.google.protobuf.ByteString
getSessionTargetBytes() {
java.lang.Object ref = sessionTarget_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
sessionTarget_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public Builder setSessionTarget(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
sessionTarget_ = value;
onChanged();
return this;
}
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public Builder clearSessionTarget() {
sessionTarget_ = getDefaultInstance().getSessionTarget();
onChanged();
return this;
}
/**
*
* The TensorFlow runtime to connect to.
* See full documentation in tensorflow/core/public/session_options.h.
* For single machine serving, we recommend using the empty string "", which
* will configure the local TensorFlow runtime implementation. This provides
* the best isolation currently available across multiple Session servables.
*
*
* string session_target = 1;
*/
public Builder setSessionTargetBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
sessionTarget_ = value;
onChanged();
return this;
}
private org.tensorflow.framework.ConfigProto sessionConfig_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto, org.tensorflow.framework.ConfigProto.Builder, org.tensorflow.framework.ConfigProtoOrBuilder> sessionConfigBuilder_;
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public boolean hasSessionConfig() {
return sessionConfigBuilder_ != null || sessionConfig_ != null;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public org.tensorflow.framework.ConfigProto getSessionConfig() {
if (sessionConfigBuilder_ == null) {
return sessionConfig_ == null ? org.tensorflow.framework.ConfigProto.getDefaultInstance() : sessionConfig_;
} else {
return sessionConfigBuilder_.getMessage();
}
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public Builder setSessionConfig(org.tensorflow.framework.ConfigProto value) {
if (sessionConfigBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
sessionConfig_ = value;
onChanged();
} else {
sessionConfigBuilder_.setMessage(value);
}
return this;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public Builder setSessionConfig(
org.tensorflow.framework.ConfigProto.Builder builderForValue) {
if (sessionConfigBuilder_ == null) {
sessionConfig_ = builderForValue.build();
onChanged();
} else {
sessionConfigBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public Builder mergeSessionConfig(org.tensorflow.framework.ConfigProto value) {
if (sessionConfigBuilder_ == null) {
if (sessionConfig_ != null) {
sessionConfig_ =
org.tensorflow.framework.ConfigProto.newBuilder(sessionConfig_).mergeFrom(value).buildPartial();
} else {
sessionConfig_ = value;
}
onChanged();
} else {
sessionConfigBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public Builder clearSessionConfig() {
if (sessionConfigBuilder_ == null) {
sessionConfig_ = null;
onChanged();
} else {
sessionConfig_ = null;
sessionConfigBuilder_ = null;
}
return this;
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public org.tensorflow.framework.ConfigProto.Builder getSessionConfigBuilder() {
onChanged();
return getSessionConfigFieldBuilder().getBuilder();
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
public org.tensorflow.framework.ConfigProtoOrBuilder getSessionConfigOrBuilder() {
if (sessionConfigBuilder_ != null) {
return sessionConfigBuilder_.getMessageOrBuilder();
} else {
return sessionConfig_ == null ?
org.tensorflow.framework.ConfigProto.getDefaultInstance() : sessionConfig_;
}
}
/**
*
* TensorFlow Session configuration options.
* See details at tensorflow/core/protobuf/config.proto.
*
*
* .tensorflow.ConfigProto session_config = 2;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto, org.tensorflow.framework.ConfigProto.Builder, org.tensorflow.framework.ConfigProtoOrBuilder>
getSessionConfigFieldBuilder() {
if (sessionConfigBuilder_ == null) {
sessionConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto, org.tensorflow.framework.ConfigProto.Builder, org.tensorflow.framework.ConfigProtoOrBuilder>(
getSessionConfig(),
getParentForChildren(),
isClean());
sessionConfig_ = null;
}
return sessionConfigBuilder_;
}
private tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters batchingParameters_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder> batchingParametersBuilder_;
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public boolean hasBatchingParameters() {
return batchingParametersBuilder_ != null || batchingParameters_ != null;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getBatchingParameters() {
if (batchingParametersBuilder_ == null) {
return batchingParameters_ == null ? tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.getDefaultInstance() : batchingParameters_;
} else {
return batchingParametersBuilder_.getMessage();
}
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public Builder setBatchingParameters(tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters value) {
if (batchingParametersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
batchingParameters_ = value;
onChanged();
} else {
batchingParametersBuilder_.setMessage(value);
}
return this;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public Builder setBatchingParameters(
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder builderForValue) {
if (batchingParametersBuilder_ == null) {
batchingParameters_ = builderForValue.build();
onChanged();
} else {
batchingParametersBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public Builder mergeBatchingParameters(tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters value) {
if (batchingParametersBuilder_ == null) {
if (batchingParameters_ != null) {
batchingParameters_ =
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.newBuilder(batchingParameters_).mergeFrom(value).buildPartial();
} else {
batchingParameters_ = value;
}
onChanged();
} else {
batchingParametersBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public Builder clearBatchingParameters() {
if (batchingParametersBuilder_ == null) {
batchingParameters_ = null;
onChanged();
} else {
batchingParameters_ = null;
batchingParametersBuilder_ = null;
}
return this;
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder getBatchingParametersBuilder() {
onChanged();
return getBatchingParametersFieldBuilder().getBuilder();
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder getBatchingParametersOrBuilder() {
if (batchingParametersBuilder_ != null) {
return batchingParametersBuilder_.getMessageOrBuilder();
} else {
return batchingParameters_ == null ?
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.getDefaultInstance() : batchingParameters_;
}
}
/**
*
* If set, each emitted session is wrapped with a layer that schedules Run()
* calls in batches. The batching layer is transparent to the client
* (implements the tensorflow::Session API).
* IMPORTANT: With batching enabled, client threads will spend most of their
* time blocked on Session::Run() calls, waiting for enough peer threads to
* also call Session::Run() such that a large batch can be formed. For good
* throughput, we recommend setting the number of client threads equal to
* roughly twice the maximum batch size ('max_batch_size' below).
* The batching layer uses a SharedBatchScheduler to coordinate batching
* across multiple session servables emitted by this source adapter. A
* BatchSchedulerRetrier is added on top of each batching session.
*
*
* .tensorflow.serving.BatchingParameters batching_parameters = 3;
*/
private com.google.protobuf.SingleFieldBuilderV3<
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder>
getBatchingParametersFieldBuilder() {
if (batchingParametersBuilder_ == null) {
batchingParametersBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder>(
getBatchingParameters(),
getParentForChildren(),
isClean());
batchingParameters_ = null;
}
return batchingParametersBuilder_;
}
private com.google.protobuf.Int32Value sessionRunLoadThreadpoolIndex_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int32Value, com.google.protobuf.Int32Value.Builder, com.google.protobuf.Int32ValueOrBuilder> sessionRunLoadThreadpoolIndexBuilder_;
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public boolean hasSessionRunLoadThreadpoolIndex() {
return sessionRunLoadThreadpoolIndexBuilder_ != null || sessionRunLoadThreadpoolIndex_ != null;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public com.google.protobuf.Int32Value getSessionRunLoadThreadpoolIndex() {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
return sessionRunLoadThreadpoolIndex_ == null ? com.google.protobuf.Int32Value.getDefaultInstance() : sessionRunLoadThreadpoolIndex_;
} else {
return sessionRunLoadThreadpoolIndexBuilder_.getMessage();
}
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public Builder setSessionRunLoadThreadpoolIndex(com.google.protobuf.Int32Value value) {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
sessionRunLoadThreadpoolIndex_ = value;
onChanged();
} else {
sessionRunLoadThreadpoolIndexBuilder_.setMessage(value);
}
return this;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public Builder setSessionRunLoadThreadpoolIndex(
com.google.protobuf.Int32Value.Builder builderForValue) {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
sessionRunLoadThreadpoolIndex_ = builderForValue.build();
onChanged();
} else {
sessionRunLoadThreadpoolIndexBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public Builder mergeSessionRunLoadThreadpoolIndex(com.google.protobuf.Int32Value value) {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
if (sessionRunLoadThreadpoolIndex_ != null) {
sessionRunLoadThreadpoolIndex_ =
com.google.protobuf.Int32Value.newBuilder(sessionRunLoadThreadpoolIndex_).mergeFrom(value).buildPartial();
} else {
sessionRunLoadThreadpoolIndex_ = value;
}
onChanged();
} else {
sessionRunLoadThreadpoolIndexBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public Builder clearSessionRunLoadThreadpoolIndex() {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
sessionRunLoadThreadpoolIndex_ = null;
onChanged();
} else {
sessionRunLoadThreadpoolIndex_ = null;
sessionRunLoadThreadpoolIndexBuilder_ = null;
}
return this;
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public com.google.protobuf.Int32Value.Builder getSessionRunLoadThreadpoolIndexBuilder() {
onChanged();
return getSessionRunLoadThreadpoolIndexFieldBuilder().getBuilder();
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
public com.google.protobuf.Int32ValueOrBuilder getSessionRunLoadThreadpoolIndexOrBuilder() {
if (sessionRunLoadThreadpoolIndexBuilder_ != null) {
return sessionRunLoadThreadpoolIndexBuilder_.getMessageOrBuilder();
} else {
return sessionRunLoadThreadpoolIndex_ == null ?
com.google.protobuf.Int32Value.getDefaultInstance() : sessionRunLoadThreadpoolIndex_;
}
}
/**
*
* If set, session run calls use a separate threadpool for restore and init
* ops as part of loading the session-bundle. The value of this field should
* correspond to the index of the tensorflow::ThreadPoolOptionProto defined as
* part of `session_config.session_inter_op_thread_pool`.
*
*
* .google.protobuf.Int32Value session_run_load_threadpool_index = 4;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int32Value, com.google.protobuf.Int32Value.Builder, com.google.protobuf.Int32ValueOrBuilder>
getSessionRunLoadThreadpoolIndexFieldBuilder() {
if (sessionRunLoadThreadpoolIndexBuilder_ == null) {
sessionRunLoadThreadpoolIndexBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int32Value, com.google.protobuf.Int32Value.Builder, com.google.protobuf.Int32ValueOrBuilder>(
getSessionRunLoadThreadpoolIndex(),
getParentForChildren(),
isClean());
sessionRunLoadThreadpoolIndex_ = null;
}
return sessionRunLoadThreadpoolIndexBuilder_;
}
private long experimentalTransientRamBytesDuringLoad_ ;
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Transient memory used while loading a model, which is released once the
* loading phase has completed. (This is on top of the memory used in steady-
* state while the model is in memory after it has finished loading.)
* TODO(b/38376838): This is a temporary hack, and it applies to all models.
* Remove it once resource estimates are moved inside SavedModel.
*
*
* uint64 experimental_transient_ram_bytes_during_load = 5;
*/
public long getExperimentalTransientRamBytesDuringLoad() {
return experimentalTransientRamBytesDuringLoad_;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Transient memory used while loading a model, which is released once the
* loading phase has completed. (This is on top of the memory used in steady-
* state while the model is in memory after it has finished loading.)
* TODO(b/38376838): This is a temporary hack, and it applies to all models.
* Remove it once resource estimates are moved inside SavedModel.
*
*
* uint64 experimental_transient_ram_bytes_during_load = 5;
*/
public Builder setExperimentalTransientRamBytesDuringLoad(long value) {
experimentalTransientRamBytesDuringLoad_ = value;
onChanged();
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Transient memory used while loading a model, which is released once the
* loading phase has completed. (This is on top of the memory used in steady-
* state while the model is in memory after it has finished loading.)
* TODO(b/38376838): This is a temporary hack, and it applies to all models.
* Remove it once resource estimates are moved inside SavedModel.
*
*
* uint64 experimental_transient_ram_bytes_during_load = 5;
*/
public Builder clearExperimentalTransientRamBytesDuringLoad() {
experimentalTransientRamBytesDuringLoad_ = 0L;
onChanged();
return this;
}
private com.google.protobuf.LazyStringList savedModelTags_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureSavedModelTagsIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
savedModelTags_ = new com.google.protobuf.LazyStringArrayList(savedModelTags_);
bitField0_ |= 0x00000020;
}
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public com.google.protobuf.ProtocolStringList
getSavedModelTagsList() {
return savedModelTags_.getUnmodifiableView();
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public int getSavedModelTagsCount() {
return savedModelTags_.size();
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public java.lang.String getSavedModelTags(int index) {
return savedModelTags_.get(index);
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public com.google.protobuf.ByteString
getSavedModelTagsBytes(int index) {
return savedModelTags_.getByteString(index);
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public Builder setSavedModelTags(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureSavedModelTagsIsMutable();
savedModelTags_.set(index, value);
onChanged();
return this;
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public Builder addSavedModelTags(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureSavedModelTagsIsMutable();
savedModelTags_.add(value);
onChanged();
return this;
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public Builder addAllSavedModelTags(
java.lang.Iterable values) {
ensureSavedModelTagsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, savedModelTags_);
onChanged();
return this;
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public Builder clearSavedModelTags() {
savedModelTags_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
/**
*
* Set of SavedModel tags identifying the specific meta graph def to be
* loaded.
*
*
* repeated string saved_model_tags = 6;
*/
public Builder addSavedModelTagsBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
ensureSavedModelTagsIsMutable();
savedModelTags_.add(value);
onChanged();
return this;
}
private java.util.List experimentalFixedInputTensors_ =
java.util.Collections.emptyList();
private void ensureExperimentalFixedInputTensorsIsMutable() {
if (!((bitField0_ & 0x00000040) == 0x00000040)) {
experimentalFixedInputTensors_ = new java.util.ArrayList(experimentalFixedInputTensors_);
bitField0_ |= 0x00000040;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.NamedTensorProto, org.tensorflow.framework.NamedTensorProto.Builder, org.tensorflow.framework.NamedTensorProtoOrBuilder> experimentalFixedInputTensorsBuilder_;
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public java.util.List getExperimentalFixedInputTensorsList() {
if (experimentalFixedInputTensorsBuilder_ == null) {
return java.util.Collections.unmodifiableList(experimentalFixedInputTensors_);
} else {
return experimentalFixedInputTensorsBuilder_.getMessageList();
}
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public int getExperimentalFixedInputTensorsCount() {
if (experimentalFixedInputTensorsBuilder_ == null) {
return experimentalFixedInputTensors_.size();
} else {
return experimentalFixedInputTensorsBuilder_.getCount();
}
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProto getExperimentalFixedInputTensors(int index) {
if (experimentalFixedInputTensorsBuilder_ == null) {
return experimentalFixedInputTensors_.get(index);
} else {
return experimentalFixedInputTensorsBuilder_.getMessage(index);
}
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder setExperimentalFixedInputTensors(
int index, org.tensorflow.framework.NamedTensorProto value) {
if (experimentalFixedInputTensorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.set(index, value);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder setExperimentalFixedInputTensors(
int index, org.tensorflow.framework.NamedTensorProto.Builder builderForValue) {
if (experimentalFixedInputTensorsBuilder_ == null) {
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.set(index, builderForValue.build());
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder addExperimentalFixedInputTensors(org.tensorflow.framework.NamedTensorProto value) {
if (experimentalFixedInputTensorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.add(value);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.addMessage(value);
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder addExperimentalFixedInputTensors(
int index, org.tensorflow.framework.NamedTensorProto value) {
if (experimentalFixedInputTensorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.add(index, value);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder addExperimentalFixedInputTensors(
org.tensorflow.framework.NamedTensorProto.Builder builderForValue) {
if (experimentalFixedInputTensorsBuilder_ == null) {
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.add(builderForValue.build());
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder addExperimentalFixedInputTensors(
int index, org.tensorflow.framework.NamedTensorProto.Builder builderForValue) {
if (experimentalFixedInputTensorsBuilder_ == null) {
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.add(index, builderForValue.build());
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder addAllExperimentalFixedInputTensors(
java.lang.Iterable extends org.tensorflow.framework.NamedTensorProto> values) {
if (experimentalFixedInputTensorsBuilder_ == null) {
ensureExperimentalFixedInputTensorsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, experimentalFixedInputTensors_);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.addAllMessages(values);
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder clearExperimentalFixedInputTensors() {
if (experimentalFixedInputTensorsBuilder_ == null) {
experimentalFixedInputTensors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.clear();
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public Builder removeExperimentalFixedInputTensors(int index) {
if (experimentalFixedInputTensorsBuilder_ == null) {
ensureExperimentalFixedInputTensorsIsMutable();
experimentalFixedInputTensors_.remove(index);
onChanged();
} else {
experimentalFixedInputTensorsBuilder_.remove(index);
}
return this;
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProto.Builder getExperimentalFixedInputTensorsBuilder(
int index) {
return getExperimentalFixedInputTensorsFieldBuilder().getBuilder(index);
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProtoOrBuilder getExperimentalFixedInputTensorsOrBuilder(
int index) {
if (experimentalFixedInputTensorsBuilder_ == null) {
return experimentalFixedInputTensors_.get(index); } else {
return experimentalFixedInputTensorsBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public java.util.List extends org.tensorflow.framework.NamedTensorProtoOrBuilder>
getExperimentalFixedInputTensorsOrBuilderList() {
if (experimentalFixedInputTensorsBuilder_ != null) {
return experimentalFixedInputTensorsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(experimentalFixedInputTensors_);
}
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProto.Builder addExperimentalFixedInputTensorsBuilder() {
return getExperimentalFixedInputTensorsFieldBuilder().addBuilder(
org.tensorflow.framework.NamedTensorProto.getDefaultInstance());
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public org.tensorflow.framework.NamedTensorProto.Builder addExperimentalFixedInputTensorsBuilder(
int index) {
return getExperimentalFixedInputTensorsFieldBuilder().addBuilder(
index, org.tensorflow.framework.NamedTensorProto.getDefaultInstance());
}
/**
*
* EXPERIMENTAL. THIS FIELD MAY CHANGE OR GO AWAY. USE WITH CAUTION.
* Input tensors to append to every Session::Run() call.
*
*
* repeated .tensorflow.NamedTensorProto experimental_fixed_input_tensors = 778;
*/
public java.util.List
getExperimentalFixedInputTensorsBuilderList() {
return getExperimentalFixedInputTensorsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.NamedTensorProto, org.tensorflow.framework.NamedTensorProto.Builder, org.tensorflow.framework.NamedTensorProtoOrBuilder>
getExperimentalFixedInputTensorsFieldBuilder() {
if (experimentalFixedInputTensorsBuilder_ == null) {
experimentalFixedInputTensorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.NamedTensorProto, org.tensorflow.framework.NamedTensorProto.Builder, org.tensorflow.framework.NamedTensorProtoOrBuilder>(
experimentalFixedInputTensors_,
((bitField0_ & 0x00000040) == 0x00000040),
getParentForChildren(),
isClean());
experimentalFixedInputTensors_ = null;
}
return experimentalFixedInputTensorsBuilder_;
}
private boolean enableModelWarmup_ ;
/**
*
* Enables model warmup.
*
*
* bool enable_model_warmup = 779;
*/
public boolean getEnableModelWarmup() {
return enableModelWarmup_;
}
/**
*
* Enables model warmup.
*
*
* bool enable_model_warmup = 779;
*/
public Builder setEnableModelWarmup(boolean value) {
enableModelWarmup_ = value;
onChanged();
return this;
}
/**
*
* Enables model warmup.
*
*
* bool enable_model_warmup = 779;
*/
public Builder clearEnableModelWarmup() {
enableModelWarmup_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.serving.SessionBundleConfig)
}
// @@protoc_insertion_point(class_scope:tensorflow.serving.SessionBundleConfig)
private static final tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig();
}
public static tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public SessionBundleConfig parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SessionBundleConfig(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.SessionBundleConfig getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface BatchingParametersOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.serving.BatchingParameters)
com.google.protobuf.MessageOrBuilder {
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
boolean hasMaxBatchSize();
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
com.google.protobuf.Int64Value getMaxBatchSize();
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
com.google.protobuf.Int64ValueOrBuilder getMaxBatchSizeOrBuilder();
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
boolean hasBatchTimeoutMicros();
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
com.google.protobuf.Int64Value getBatchTimeoutMicros();
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
com.google.protobuf.Int64ValueOrBuilder getBatchTimeoutMicrosOrBuilder();
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
boolean hasMaxEnqueuedBatches();
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
com.google.protobuf.Int64Value getMaxEnqueuedBatches();
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
com.google.protobuf.Int64ValueOrBuilder getMaxEnqueuedBatchesOrBuilder();
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
boolean hasNumBatchThreads();
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
com.google.protobuf.Int64Value getNumBatchThreads();
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
com.google.protobuf.Int64ValueOrBuilder getNumBatchThreadsOrBuilder();
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
boolean hasThreadPoolName();
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
com.google.protobuf.StringValue getThreadPoolName();
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
com.google.protobuf.StringValueOrBuilder getThreadPoolNameOrBuilder();
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
java.util.List getAllowedBatchSizesList();
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
int getAllowedBatchSizesCount();
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
long getAllowedBatchSizes(int index);
/**
*
* Whether to pad variable-length inputs when a batch is formed.
*
*
* bool pad_variable_length_inputs = 7;
*/
boolean getPadVariableLengthInputs();
}
/**
*
* Batching parameters. Each individual parameter is optional. If omitted, the
* default value from the relevant batching config struct (SharedBatchScheduler
* ::Options or BatchSchedulerRetrier::Options) is used.
*
*
* Protobuf type {@code tensorflow.serving.BatchingParameters}
*/
public static final class BatchingParameters extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.serving.BatchingParameters)
BatchingParametersOrBuilder {
private static final long serialVersionUID = 0L;
// Use BatchingParameters.newBuilder() to construct.
private BatchingParameters(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private BatchingParameters() {
allowedBatchSizes_ = java.util.Collections.emptyList();
padVariableLengthInputs_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private BatchingParameters(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
com.google.protobuf.Int64Value.Builder subBuilder = null;
if (maxBatchSize_ != null) {
subBuilder = maxBatchSize_.toBuilder();
}
maxBatchSize_ = input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(maxBatchSize_);
maxBatchSize_ = subBuilder.buildPartial();
}
break;
}
case 18: {
com.google.protobuf.Int64Value.Builder subBuilder = null;
if (batchTimeoutMicros_ != null) {
subBuilder = batchTimeoutMicros_.toBuilder();
}
batchTimeoutMicros_ = input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(batchTimeoutMicros_);
batchTimeoutMicros_ = subBuilder.buildPartial();
}
break;
}
case 26: {
com.google.protobuf.Int64Value.Builder subBuilder = null;
if (maxEnqueuedBatches_ != null) {
subBuilder = maxEnqueuedBatches_.toBuilder();
}
maxEnqueuedBatches_ = input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(maxEnqueuedBatches_);
maxEnqueuedBatches_ = subBuilder.buildPartial();
}
break;
}
case 34: {
com.google.protobuf.Int64Value.Builder subBuilder = null;
if (numBatchThreads_ != null) {
subBuilder = numBatchThreads_.toBuilder();
}
numBatchThreads_ = input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(numBatchThreads_);
numBatchThreads_ = subBuilder.buildPartial();
}
break;
}
case 42: {
com.google.protobuf.StringValue.Builder subBuilder = null;
if (threadPoolName_ != null) {
subBuilder = threadPoolName_.toBuilder();
}
threadPoolName_ = input.readMessage(com.google.protobuf.StringValue.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(threadPoolName_);
threadPoolName_ = subBuilder.buildPartial();
}
break;
}
case 48: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
allowedBatchSizes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
allowedBatchSizes_.add(input.readInt64());
break;
}
case 50: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020) && input.getBytesUntilLimit() > 0) {
allowedBatchSizes_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
while (input.getBytesUntilLimit() > 0) {
allowedBatchSizes_.add(input.readInt64());
}
input.popLimit(limit);
break;
}
case 56: {
padVariableLengthInputs_ = input.readBool();
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
allowedBatchSizes_ = java.util.Collections.unmodifiableList(allowedBatchSizes_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_BatchingParameters_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_BatchingParameters_fieldAccessorTable
.ensureFieldAccessorsInitialized(
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.class, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder.class);
}
private int bitField0_;
public static final int MAX_BATCH_SIZE_FIELD_NUMBER = 1;
private com.google.protobuf.Int64Value maxBatchSize_;
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public boolean hasMaxBatchSize() {
return maxBatchSize_ != null;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public com.google.protobuf.Int64Value getMaxBatchSize() {
return maxBatchSize_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : maxBatchSize_;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public com.google.protobuf.Int64ValueOrBuilder getMaxBatchSizeOrBuilder() {
return getMaxBatchSize();
}
public static final int BATCH_TIMEOUT_MICROS_FIELD_NUMBER = 2;
private com.google.protobuf.Int64Value batchTimeoutMicros_;
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public boolean hasBatchTimeoutMicros() {
return batchTimeoutMicros_ != null;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public com.google.protobuf.Int64Value getBatchTimeoutMicros() {
return batchTimeoutMicros_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : batchTimeoutMicros_;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public com.google.protobuf.Int64ValueOrBuilder getBatchTimeoutMicrosOrBuilder() {
return getBatchTimeoutMicros();
}
public static final int MAX_ENQUEUED_BATCHES_FIELD_NUMBER = 3;
private com.google.protobuf.Int64Value maxEnqueuedBatches_;
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public boolean hasMaxEnqueuedBatches() {
return maxEnqueuedBatches_ != null;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public com.google.protobuf.Int64Value getMaxEnqueuedBatches() {
return maxEnqueuedBatches_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : maxEnqueuedBatches_;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public com.google.protobuf.Int64ValueOrBuilder getMaxEnqueuedBatchesOrBuilder() {
return getMaxEnqueuedBatches();
}
public static final int NUM_BATCH_THREADS_FIELD_NUMBER = 4;
private com.google.protobuf.Int64Value numBatchThreads_;
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public boolean hasNumBatchThreads() {
return numBatchThreads_ != null;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public com.google.protobuf.Int64Value getNumBatchThreads() {
return numBatchThreads_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : numBatchThreads_;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public com.google.protobuf.Int64ValueOrBuilder getNumBatchThreadsOrBuilder() {
return getNumBatchThreads();
}
public static final int THREAD_POOL_NAME_FIELD_NUMBER = 5;
private com.google.protobuf.StringValue threadPoolName_;
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public boolean hasThreadPoolName() {
return threadPoolName_ != null;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public com.google.protobuf.StringValue getThreadPoolName() {
return threadPoolName_ == null ? com.google.protobuf.StringValue.getDefaultInstance() : threadPoolName_;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public com.google.protobuf.StringValueOrBuilder getThreadPoolNameOrBuilder() {
return getThreadPoolName();
}
public static final int ALLOWED_BATCH_SIZES_FIELD_NUMBER = 6;
private java.util.List allowedBatchSizes_;
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public java.util.List
getAllowedBatchSizesList() {
return allowedBatchSizes_;
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public int getAllowedBatchSizesCount() {
return allowedBatchSizes_.size();
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public long getAllowedBatchSizes(int index) {
return allowedBatchSizes_.get(index);
}
private int allowedBatchSizesMemoizedSerializedSize = -1;
public static final int PAD_VARIABLE_LENGTH_INPUTS_FIELD_NUMBER = 7;
private boolean padVariableLengthInputs_;
/**
*
* Whether to pad variable-length inputs when a batch is formed.
*
*
* bool pad_variable_length_inputs = 7;
*/
public boolean getPadVariableLengthInputs() {
return padVariableLengthInputs_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (maxBatchSize_ != null) {
output.writeMessage(1, getMaxBatchSize());
}
if (batchTimeoutMicros_ != null) {
output.writeMessage(2, getBatchTimeoutMicros());
}
if (maxEnqueuedBatches_ != null) {
output.writeMessage(3, getMaxEnqueuedBatches());
}
if (numBatchThreads_ != null) {
output.writeMessage(4, getNumBatchThreads());
}
if (threadPoolName_ != null) {
output.writeMessage(5, getThreadPoolName());
}
if (getAllowedBatchSizesList().size() > 0) {
output.writeUInt32NoTag(50);
output.writeUInt32NoTag(allowedBatchSizesMemoizedSerializedSize);
}
for (int i = 0; i < allowedBatchSizes_.size(); i++) {
output.writeInt64NoTag(allowedBatchSizes_.get(i));
}
if (padVariableLengthInputs_ != false) {
output.writeBool(7, padVariableLengthInputs_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (maxBatchSize_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, getMaxBatchSize());
}
if (batchTimeoutMicros_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getBatchTimeoutMicros());
}
if (maxEnqueuedBatches_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, getMaxEnqueuedBatches());
}
if (numBatchThreads_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, getNumBatchThreads());
}
if (threadPoolName_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, getThreadPoolName());
}
{
int dataSize = 0;
for (int i = 0; i < allowedBatchSizes_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt64SizeNoTag(allowedBatchSizes_.get(i));
}
size += dataSize;
if (!getAllowedBatchSizesList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
allowedBatchSizesMemoizedSerializedSize = dataSize;
}
if (padVariableLengthInputs_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(7, padVariableLengthInputs_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters)) {
return super.equals(obj);
}
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters other = (tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters) obj;
boolean result = true;
result = result && (hasMaxBatchSize() == other.hasMaxBatchSize());
if (hasMaxBatchSize()) {
result = result && getMaxBatchSize()
.equals(other.getMaxBatchSize());
}
result = result && (hasBatchTimeoutMicros() == other.hasBatchTimeoutMicros());
if (hasBatchTimeoutMicros()) {
result = result && getBatchTimeoutMicros()
.equals(other.getBatchTimeoutMicros());
}
result = result && (hasMaxEnqueuedBatches() == other.hasMaxEnqueuedBatches());
if (hasMaxEnqueuedBatches()) {
result = result && getMaxEnqueuedBatches()
.equals(other.getMaxEnqueuedBatches());
}
result = result && (hasNumBatchThreads() == other.hasNumBatchThreads());
if (hasNumBatchThreads()) {
result = result && getNumBatchThreads()
.equals(other.getNumBatchThreads());
}
result = result && (hasThreadPoolName() == other.hasThreadPoolName());
if (hasThreadPoolName()) {
result = result && getThreadPoolName()
.equals(other.getThreadPoolName());
}
result = result && getAllowedBatchSizesList()
.equals(other.getAllowedBatchSizesList());
result = result && (getPadVariableLengthInputs()
== other.getPadVariableLengthInputs());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasMaxBatchSize()) {
hash = (37 * hash) + MAX_BATCH_SIZE_FIELD_NUMBER;
hash = (53 * hash) + getMaxBatchSize().hashCode();
}
if (hasBatchTimeoutMicros()) {
hash = (37 * hash) + BATCH_TIMEOUT_MICROS_FIELD_NUMBER;
hash = (53 * hash) + getBatchTimeoutMicros().hashCode();
}
if (hasMaxEnqueuedBatches()) {
hash = (37 * hash) + MAX_ENQUEUED_BATCHES_FIELD_NUMBER;
hash = (53 * hash) + getMaxEnqueuedBatches().hashCode();
}
if (hasNumBatchThreads()) {
hash = (37 * hash) + NUM_BATCH_THREADS_FIELD_NUMBER;
hash = (53 * hash) + getNumBatchThreads().hashCode();
}
if (hasThreadPoolName()) {
hash = (37 * hash) + THREAD_POOL_NAME_FIELD_NUMBER;
hash = (53 * hash) + getThreadPoolName().hashCode();
}
if (getAllowedBatchSizesCount() > 0) {
hash = (37 * hash) + ALLOWED_BATCH_SIZES_FIELD_NUMBER;
hash = (53 * hash) + getAllowedBatchSizesList().hashCode();
}
hash = (37 * hash) + PAD_VARIABLE_LENGTH_INPUTS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getPadVariableLengthInputs());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Batching parameters. Each individual parameter is optional. If omitted, the
* default value from the relevant batching config struct (SharedBatchScheduler
* ::Options or BatchSchedulerRetrier::Options) is used.
*
*
* Protobuf type {@code tensorflow.serving.BatchingParameters}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.serving.BatchingParameters)
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParametersOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_BatchingParameters_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_BatchingParameters_fieldAccessorTable
.ensureFieldAccessorsInitialized(
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.class, tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.Builder.class);
}
// Construct using tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (maxBatchSizeBuilder_ == null) {
maxBatchSize_ = null;
} else {
maxBatchSize_ = null;
maxBatchSizeBuilder_ = null;
}
if (batchTimeoutMicrosBuilder_ == null) {
batchTimeoutMicros_ = null;
} else {
batchTimeoutMicros_ = null;
batchTimeoutMicrosBuilder_ = null;
}
if (maxEnqueuedBatchesBuilder_ == null) {
maxEnqueuedBatches_ = null;
} else {
maxEnqueuedBatches_ = null;
maxEnqueuedBatchesBuilder_ = null;
}
if (numBatchThreadsBuilder_ == null) {
numBatchThreads_ = null;
} else {
numBatchThreads_ = null;
numBatchThreadsBuilder_ = null;
}
if (threadPoolNameBuilder_ == null) {
threadPoolName_ = null;
} else {
threadPoolName_ = null;
threadPoolNameBuilder_ = null;
}
allowedBatchSizes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
padVariableLengthInputs_ = false;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return tensorflow.serving.SessionBundleConfigOuterClass.internal_static_tensorflow_serving_BatchingParameters_descriptor;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getDefaultInstanceForType() {
return tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.getDefaultInstance();
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters build() {
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters buildPartial() {
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters result = new tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (maxBatchSizeBuilder_ == null) {
result.maxBatchSize_ = maxBatchSize_;
} else {
result.maxBatchSize_ = maxBatchSizeBuilder_.build();
}
if (batchTimeoutMicrosBuilder_ == null) {
result.batchTimeoutMicros_ = batchTimeoutMicros_;
} else {
result.batchTimeoutMicros_ = batchTimeoutMicrosBuilder_.build();
}
if (maxEnqueuedBatchesBuilder_ == null) {
result.maxEnqueuedBatches_ = maxEnqueuedBatches_;
} else {
result.maxEnqueuedBatches_ = maxEnqueuedBatchesBuilder_.build();
}
if (numBatchThreadsBuilder_ == null) {
result.numBatchThreads_ = numBatchThreads_;
} else {
result.numBatchThreads_ = numBatchThreadsBuilder_.build();
}
if (threadPoolNameBuilder_ == null) {
result.threadPoolName_ = threadPoolName_;
} else {
result.threadPoolName_ = threadPoolNameBuilder_.build();
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
allowedBatchSizes_ = java.util.Collections.unmodifiableList(allowedBatchSizes_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.allowedBatchSizes_ = allowedBatchSizes_;
result.padVariableLengthInputs_ = padVariableLengthInputs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters) {
return mergeFrom((tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters other) {
if (other == tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters.getDefaultInstance()) return this;
if (other.hasMaxBatchSize()) {
mergeMaxBatchSize(other.getMaxBatchSize());
}
if (other.hasBatchTimeoutMicros()) {
mergeBatchTimeoutMicros(other.getBatchTimeoutMicros());
}
if (other.hasMaxEnqueuedBatches()) {
mergeMaxEnqueuedBatches(other.getMaxEnqueuedBatches());
}
if (other.hasNumBatchThreads()) {
mergeNumBatchThreads(other.getNumBatchThreads());
}
if (other.hasThreadPoolName()) {
mergeThreadPoolName(other.getThreadPoolName());
}
if (!other.allowedBatchSizes_.isEmpty()) {
if (allowedBatchSizes_.isEmpty()) {
allowedBatchSizes_ = other.allowedBatchSizes_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureAllowedBatchSizesIsMutable();
allowedBatchSizes_.addAll(other.allowedBatchSizes_);
}
onChanged();
}
if (other.getPadVariableLengthInputs() != false) {
setPadVariableLengthInputs(other.getPadVariableLengthInputs());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private com.google.protobuf.Int64Value maxBatchSize_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder> maxBatchSizeBuilder_;
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public boolean hasMaxBatchSize() {
return maxBatchSizeBuilder_ != null || maxBatchSize_ != null;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public com.google.protobuf.Int64Value getMaxBatchSize() {
if (maxBatchSizeBuilder_ == null) {
return maxBatchSize_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : maxBatchSize_;
} else {
return maxBatchSizeBuilder_.getMessage();
}
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public Builder setMaxBatchSize(com.google.protobuf.Int64Value value) {
if (maxBatchSizeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
maxBatchSize_ = value;
onChanged();
} else {
maxBatchSizeBuilder_.setMessage(value);
}
return this;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public Builder setMaxBatchSize(
com.google.protobuf.Int64Value.Builder builderForValue) {
if (maxBatchSizeBuilder_ == null) {
maxBatchSize_ = builderForValue.build();
onChanged();
} else {
maxBatchSizeBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public Builder mergeMaxBatchSize(com.google.protobuf.Int64Value value) {
if (maxBatchSizeBuilder_ == null) {
if (maxBatchSize_ != null) {
maxBatchSize_ =
com.google.protobuf.Int64Value.newBuilder(maxBatchSize_).mergeFrom(value).buildPartial();
} else {
maxBatchSize_ = value;
}
onChanged();
} else {
maxBatchSizeBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public Builder clearMaxBatchSize() {
if (maxBatchSizeBuilder_ == null) {
maxBatchSize_ = null;
onChanged();
} else {
maxBatchSize_ = null;
maxBatchSizeBuilder_ = null;
}
return this;
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public com.google.protobuf.Int64Value.Builder getMaxBatchSizeBuilder() {
onChanged();
return getMaxBatchSizeFieldBuilder().getBuilder();
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
public com.google.protobuf.Int64ValueOrBuilder getMaxBatchSizeOrBuilder() {
if (maxBatchSizeBuilder_ != null) {
return maxBatchSizeBuilder_.getMessageOrBuilder();
} else {
return maxBatchSize_ == null ?
com.google.protobuf.Int64Value.getDefaultInstance() : maxBatchSize_;
}
}
/**
*
* The maximum size of each batch.
* IMPORTANT: As discussed above, use 'max_batch_size * 2' client threads to
* achieve high throughput with batching.
*
*
* .google.protobuf.Int64Value max_batch_size = 1;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>
getMaxBatchSizeFieldBuilder() {
if (maxBatchSizeBuilder_ == null) {
maxBatchSizeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>(
getMaxBatchSize(),
getParentForChildren(),
isClean());
maxBatchSize_ = null;
}
return maxBatchSizeBuilder_;
}
private com.google.protobuf.Int64Value batchTimeoutMicros_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder> batchTimeoutMicrosBuilder_;
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public boolean hasBatchTimeoutMicros() {
return batchTimeoutMicrosBuilder_ != null || batchTimeoutMicros_ != null;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public com.google.protobuf.Int64Value getBatchTimeoutMicros() {
if (batchTimeoutMicrosBuilder_ == null) {
return batchTimeoutMicros_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : batchTimeoutMicros_;
} else {
return batchTimeoutMicrosBuilder_.getMessage();
}
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public Builder setBatchTimeoutMicros(com.google.protobuf.Int64Value value) {
if (batchTimeoutMicrosBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
batchTimeoutMicros_ = value;
onChanged();
} else {
batchTimeoutMicrosBuilder_.setMessage(value);
}
return this;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public Builder setBatchTimeoutMicros(
com.google.protobuf.Int64Value.Builder builderForValue) {
if (batchTimeoutMicrosBuilder_ == null) {
batchTimeoutMicros_ = builderForValue.build();
onChanged();
} else {
batchTimeoutMicrosBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public Builder mergeBatchTimeoutMicros(com.google.protobuf.Int64Value value) {
if (batchTimeoutMicrosBuilder_ == null) {
if (batchTimeoutMicros_ != null) {
batchTimeoutMicros_ =
com.google.protobuf.Int64Value.newBuilder(batchTimeoutMicros_).mergeFrom(value).buildPartial();
} else {
batchTimeoutMicros_ = value;
}
onChanged();
} else {
batchTimeoutMicrosBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public Builder clearBatchTimeoutMicros() {
if (batchTimeoutMicrosBuilder_ == null) {
batchTimeoutMicros_ = null;
onChanged();
} else {
batchTimeoutMicros_ = null;
batchTimeoutMicrosBuilder_ = null;
}
return this;
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public com.google.protobuf.Int64Value.Builder getBatchTimeoutMicrosBuilder() {
onChanged();
return getBatchTimeoutMicrosFieldBuilder().getBuilder();
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
public com.google.protobuf.Int64ValueOrBuilder getBatchTimeoutMicrosOrBuilder() {
if (batchTimeoutMicrosBuilder_ != null) {
return batchTimeoutMicrosBuilder_.getMessageOrBuilder();
} else {
return batchTimeoutMicros_ == null ?
com.google.protobuf.Int64Value.getDefaultInstance() : batchTimeoutMicros_;
}
}
/**
*
* If a task has been enqueued for this amount of time (in microseconds), and
* a thread is available, the scheduler will immediately form a batch from
* enqueued tasks and assign the batch to the thread for processing, even if
* the batch's size is below 'max_batch_size'.
*
*
* .google.protobuf.Int64Value batch_timeout_micros = 2;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>
getBatchTimeoutMicrosFieldBuilder() {
if (batchTimeoutMicrosBuilder_ == null) {
batchTimeoutMicrosBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>(
getBatchTimeoutMicros(),
getParentForChildren(),
isClean());
batchTimeoutMicros_ = null;
}
return batchTimeoutMicrosBuilder_;
}
private com.google.protobuf.Int64Value maxEnqueuedBatches_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder> maxEnqueuedBatchesBuilder_;
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public boolean hasMaxEnqueuedBatches() {
return maxEnqueuedBatchesBuilder_ != null || maxEnqueuedBatches_ != null;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public com.google.protobuf.Int64Value getMaxEnqueuedBatches() {
if (maxEnqueuedBatchesBuilder_ == null) {
return maxEnqueuedBatches_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : maxEnqueuedBatches_;
} else {
return maxEnqueuedBatchesBuilder_.getMessage();
}
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public Builder setMaxEnqueuedBatches(com.google.protobuf.Int64Value value) {
if (maxEnqueuedBatchesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
maxEnqueuedBatches_ = value;
onChanged();
} else {
maxEnqueuedBatchesBuilder_.setMessage(value);
}
return this;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public Builder setMaxEnqueuedBatches(
com.google.protobuf.Int64Value.Builder builderForValue) {
if (maxEnqueuedBatchesBuilder_ == null) {
maxEnqueuedBatches_ = builderForValue.build();
onChanged();
} else {
maxEnqueuedBatchesBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public Builder mergeMaxEnqueuedBatches(com.google.protobuf.Int64Value value) {
if (maxEnqueuedBatchesBuilder_ == null) {
if (maxEnqueuedBatches_ != null) {
maxEnqueuedBatches_ =
com.google.protobuf.Int64Value.newBuilder(maxEnqueuedBatches_).mergeFrom(value).buildPartial();
} else {
maxEnqueuedBatches_ = value;
}
onChanged();
} else {
maxEnqueuedBatchesBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public Builder clearMaxEnqueuedBatches() {
if (maxEnqueuedBatchesBuilder_ == null) {
maxEnqueuedBatches_ = null;
onChanged();
} else {
maxEnqueuedBatches_ = null;
maxEnqueuedBatchesBuilder_ = null;
}
return this;
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public com.google.protobuf.Int64Value.Builder getMaxEnqueuedBatchesBuilder() {
onChanged();
return getMaxEnqueuedBatchesFieldBuilder().getBuilder();
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
public com.google.protobuf.Int64ValueOrBuilder getMaxEnqueuedBatchesOrBuilder() {
if (maxEnqueuedBatchesBuilder_ != null) {
return maxEnqueuedBatchesBuilder_.getMessageOrBuilder();
} else {
return maxEnqueuedBatches_ == null ?
com.google.protobuf.Int64Value.getDefaultInstance() : maxEnqueuedBatches_;
}
}
/**
*
* The maximum length of the queue, in terms of the number of batches. (A
* batch that has been scheduled on a thread is considered to have been
* removed from the queue.)
*
*
* .google.protobuf.Int64Value max_enqueued_batches = 3;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>
getMaxEnqueuedBatchesFieldBuilder() {
if (maxEnqueuedBatchesBuilder_ == null) {
maxEnqueuedBatchesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>(
getMaxEnqueuedBatches(),
getParentForChildren(),
isClean());
maxEnqueuedBatches_ = null;
}
return maxEnqueuedBatchesBuilder_;
}
private com.google.protobuf.Int64Value numBatchThreads_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder> numBatchThreadsBuilder_;
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public boolean hasNumBatchThreads() {
return numBatchThreadsBuilder_ != null || numBatchThreads_ != null;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public com.google.protobuf.Int64Value getNumBatchThreads() {
if (numBatchThreadsBuilder_ == null) {
return numBatchThreads_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : numBatchThreads_;
} else {
return numBatchThreadsBuilder_.getMessage();
}
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public Builder setNumBatchThreads(com.google.protobuf.Int64Value value) {
if (numBatchThreadsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
numBatchThreads_ = value;
onChanged();
} else {
numBatchThreadsBuilder_.setMessage(value);
}
return this;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public Builder setNumBatchThreads(
com.google.protobuf.Int64Value.Builder builderForValue) {
if (numBatchThreadsBuilder_ == null) {
numBatchThreads_ = builderForValue.build();
onChanged();
} else {
numBatchThreadsBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public Builder mergeNumBatchThreads(com.google.protobuf.Int64Value value) {
if (numBatchThreadsBuilder_ == null) {
if (numBatchThreads_ != null) {
numBatchThreads_ =
com.google.protobuf.Int64Value.newBuilder(numBatchThreads_).mergeFrom(value).buildPartial();
} else {
numBatchThreads_ = value;
}
onChanged();
} else {
numBatchThreadsBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public Builder clearNumBatchThreads() {
if (numBatchThreadsBuilder_ == null) {
numBatchThreads_ = null;
onChanged();
} else {
numBatchThreads_ = null;
numBatchThreadsBuilder_ = null;
}
return this;
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public com.google.protobuf.Int64Value.Builder getNumBatchThreadsBuilder() {
onChanged();
return getNumBatchThreadsFieldBuilder().getBuilder();
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
public com.google.protobuf.Int64ValueOrBuilder getNumBatchThreadsOrBuilder() {
if (numBatchThreadsBuilder_ != null) {
return numBatchThreadsBuilder_.getMessageOrBuilder();
} else {
return numBatchThreads_ == null ?
com.google.protobuf.Int64Value.getDefaultInstance() : numBatchThreads_;
}
}
/**
*
* The number of threads to use to process batches.
* Must be >= 1, and should be tuned carefully.
*
*
* .google.protobuf.Int64Value num_batch_threads = 4;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>
getNumBatchThreadsFieldBuilder() {
if (numBatchThreadsBuilder_ == null) {
numBatchThreadsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.Int64Value, com.google.protobuf.Int64Value.Builder, com.google.protobuf.Int64ValueOrBuilder>(
getNumBatchThreads(),
getParentForChildren(),
isClean());
numBatchThreads_ = null;
}
return numBatchThreadsBuilder_;
}
private com.google.protobuf.StringValue threadPoolName_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.StringValue, com.google.protobuf.StringValue.Builder, com.google.protobuf.StringValueOrBuilder> threadPoolNameBuilder_;
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public boolean hasThreadPoolName() {
return threadPoolNameBuilder_ != null || threadPoolName_ != null;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public com.google.protobuf.StringValue getThreadPoolName() {
if (threadPoolNameBuilder_ == null) {
return threadPoolName_ == null ? com.google.protobuf.StringValue.getDefaultInstance() : threadPoolName_;
} else {
return threadPoolNameBuilder_.getMessage();
}
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public Builder setThreadPoolName(com.google.protobuf.StringValue value) {
if (threadPoolNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
threadPoolName_ = value;
onChanged();
} else {
threadPoolNameBuilder_.setMessage(value);
}
return this;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public Builder setThreadPoolName(
com.google.protobuf.StringValue.Builder builderForValue) {
if (threadPoolNameBuilder_ == null) {
threadPoolName_ = builderForValue.build();
onChanged();
} else {
threadPoolNameBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public Builder mergeThreadPoolName(com.google.protobuf.StringValue value) {
if (threadPoolNameBuilder_ == null) {
if (threadPoolName_ != null) {
threadPoolName_ =
com.google.protobuf.StringValue.newBuilder(threadPoolName_).mergeFrom(value).buildPartial();
} else {
threadPoolName_ = value;
}
onChanged();
} else {
threadPoolNameBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public Builder clearThreadPoolName() {
if (threadPoolNameBuilder_ == null) {
threadPoolName_ = null;
onChanged();
} else {
threadPoolName_ = null;
threadPoolNameBuilder_ = null;
}
return this;
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public com.google.protobuf.StringValue.Builder getThreadPoolNameBuilder() {
onChanged();
return getThreadPoolNameFieldBuilder().getBuilder();
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
public com.google.protobuf.StringValueOrBuilder getThreadPoolNameOrBuilder() {
if (threadPoolNameBuilder_ != null) {
return threadPoolNameBuilder_.getMessageOrBuilder();
} else {
return threadPoolName_ == null ?
com.google.protobuf.StringValue.getDefaultInstance() : threadPoolName_;
}
}
/**
*
* The name to use for the pool of batch threads.
*
*
* .google.protobuf.StringValue thread_pool_name = 5;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.StringValue, com.google.protobuf.StringValue.Builder, com.google.protobuf.StringValueOrBuilder>
getThreadPoolNameFieldBuilder() {
if (threadPoolNameBuilder_ == null) {
threadPoolNameBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
com.google.protobuf.StringValue, com.google.protobuf.StringValue.Builder, com.google.protobuf.StringValueOrBuilder>(
getThreadPoolName(),
getParentForChildren(),
isClean());
threadPoolName_ = null;
}
return threadPoolNameBuilder_;
}
private java.util.List allowedBatchSizes_ = java.util.Collections.emptyList();
private void ensureAllowedBatchSizesIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
allowedBatchSizes_ = new java.util.ArrayList(allowedBatchSizes_);
bitField0_ |= 0x00000020;
}
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public java.util.List
getAllowedBatchSizesList() {
return java.util.Collections.unmodifiableList(allowedBatchSizes_);
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public int getAllowedBatchSizesCount() {
return allowedBatchSizes_.size();
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public long getAllowedBatchSizes(int index) {
return allowedBatchSizes_.get(index);
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public Builder setAllowedBatchSizes(
int index, long value) {
ensureAllowedBatchSizesIsMutable();
allowedBatchSizes_.set(index, value);
onChanged();
return this;
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public Builder addAllowedBatchSizes(long value) {
ensureAllowedBatchSizesIsMutable();
allowedBatchSizes_.add(value);
onChanged();
return this;
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public Builder addAllAllowedBatchSizes(
java.lang.Iterable extends java.lang.Long> values) {
ensureAllowedBatchSizesIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, allowedBatchSizes_);
onChanged();
return this;
}
/**
*
* The allowed batch sizes. (Ignored if left empty.)
* Requirements:
* - The entries must be in increasing order.
* - The final entry must equal 'max_batch_size'.
*
*
* repeated int64 allowed_batch_sizes = 6;
*/
public Builder clearAllowedBatchSizes() {
allowedBatchSizes_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
private boolean padVariableLengthInputs_ ;
/**
*
* Whether to pad variable-length inputs when a batch is formed.
*
*
* bool pad_variable_length_inputs = 7;
*/
public boolean getPadVariableLengthInputs() {
return padVariableLengthInputs_;
}
/**
*
* Whether to pad variable-length inputs when a batch is formed.
*
*
* bool pad_variable_length_inputs = 7;
*/
public Builder setPadVariableLengthInputs(boolean value) {
padVariableLengthInputs_ = value;
onChanged();
return this;
}
/**
*
* Whether to pad variable-length inputs when a batch is formed.
*
*
* bool pad_variable_length_inputs = 7;
*/
public Builder clearPadVariableLengthInputs() {
padVariableLengthInputs_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.serving.BatchingParameters)
}
// @@protoc_insertion_point(class_scope:tensorflow.serving.BatchingParameters)
private static final tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters();
}
public static tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public BatchingParameters parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new BatchingParameters(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public tensorflow.serving.SessionBundleConfigOuterClass.BatchingParameters getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_tensorflow_serving_SessionBundleConfig_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_tensorflow_serving_SessionBundleConfig_fieldAccessorTable;
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_tensorflow_serving_BatchingParameters_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_tensorflow_serving_BatchingParameters_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\nCtensorflow_serving/servables/tensorflo" +
"w/session_bundle_config.proto\022\022tensorflo" +
"w.serving\032\036google/protobuf/wrappers.prot" +
"o\032%tensorflow/core/protobuf/config.proto" +
"\032+tensorflow/core/protobuf/named_tensor." +
"proto\"\242\003\n\023SessionBundleConfig\022\026\n\016session" +
"_target\030\001 \001(\t\022/\n\016session_config\030\002 \001(\0132\027." +
"tensorflow.ConfigProto\022C\n\023batching_param" +
"eters\030\003 \001(\0132&.tensorflow.serving.Batchin" +
"gParameters\022F\n!session_run_load_threadpo" +
"ol_index\030\004 \001(\0132\033.google.protobuf.Int32Va" +
"lue\0224\n,experimental_transient_ram_bytes_" +
"during_load\030\005 \001(\004\022\030\n\020saved_model_tags\030\006 " +
"\003(\t\022G\n experimental_fixed_input_tensors\030" +
"\212\006 \003(\0132\034.tensorflow.NamedTensorProto\022\034\n\023" +
"enable_model_warmup\030\213\006 \001(\010\"\360\002\n\022BatchingP" +
"arameters\0223\n\016max_batch_size\030\001 \001(\0132\033.goog" +
"le.protobuf.Int64Value\0229\n\024batch_timeout_" +
"micros\030\002 \001(\0132\033.google.protobuf.Int64Valu" +
"e\0229\n\024max_enqueued_batches\030\003 \001(\0132\033.google" +
".protobuf.Int64Value\0226\n\021num_batch_thread" +
"s\030\004 \001(\0132\033.google.protobuf.Int64Value\0226\n\020" +
"thread_pool_name\030\005 \001(\0132\034.google.protobuf" +
".StringValue\022\033\n\023allowed_batch_sizes\030\006 \003(" +
"\003\022\"\n\032pad_variable_length_inputs\030\007 \001(\010b\006p" +
"roto3"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
com.google.protobuf.WrappersProto.getDescriptor(),
org.tensorflow.framework.ConfigProtos.getDescriptor(),
org.tensorflow.framework.NamedTensorProtos.getDescriptor(),
}, assigner);
internal_static_tensorflow_serving_SessionBundleConfig_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_tensorflow_serving_SessionBundleConfig_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_tensorflow_serving_SessionBundleConfig_descriptor,
new java.lang.String[] { "SessionTarget", "SessionConfig", "BatchingParameters", "SessionRunLoadThreadpoolIndex", "ExperimentalTransientRamBytesDuringLoad", "SavedModelTags", "ExperimentalFixedInputTensors", "EnableModelWarmup", });
internal_static_tensorflow_serving_BatchingParameters_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_tensorflow_serving_BatchingParameters_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_tensorflow_serving_BatchingParameters_descriptor,
new java.lang.String[] { "MaxBatchSize", "BatchTimeoutMicros", "MaxEnqueuedBatches", "NumBatchThreads", "ThreadPoolName", "AllowedBatchSizes", "PadVariableLengthInputs", });
com.google.protobuf.WrappersProto.getDescriptor();
org.tensorflow.framework.ConfigProtos.getDescriptor();
org.tensorflow.framework.NamedTensorProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}