org.tensorflow.framework.GPUOptions Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public final class GPUOptions extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions)
GPUOptionsOrBuilder {
// Use GPUOptions.newBuilder() to construct.
private GPUOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) {
super(builder);
}
private GPUOptions() {
perProcessGpuMemoryFraction_ = 0D;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
allowGrowth_ = false;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return com.google.protobuf.UnknownFieldSet.getDefaultInstance();
}
private GPUOptions(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
int mutable_bitField0_ = 0;
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!input.skipField(tag)) {
done = true;
}
break;
}
case 9: {
perProcessGpuMemoryFraction_ = input.readDouble();
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
allocatorType_ = s;
break;
}
case 24: {
deferredDeletionBytes_ = input.readInt64();
break;
}
case 32: {
allowGrowth_ = input.readBool();
break;
}
case 42: {
java.lang.String s = input.readStringRequireUtf8();
visibleDeviceList_ = s;
break;
}
case 48: {
pollingActiveDelayUsecs_ = input.readInt32();
break;
}
case 56: {
pollingInactiveDelayMsecs_ = input.readInt32();
break;
}
case 64: {
forceGpuCompatible_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
public static final int PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER = 1;
private double perProcessGpuMemoryFraction_;
/**
*
* A value between 0 and 1 that indicates what fraction of the
* available GPU memory to pre-allocate for each process. 1 means
* to pre-allocate all of the GPU memory, 0.5 means the process
* allocates ~50% of the available GPU memory.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
public static final int ALLOCATOR_TYPE_FIELD_NUMBER = 2;
private volatile java.lang.Object allocatorType_;
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public java.lang.String getAllocatorType() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
allocatorType_ = s;
return s;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public com.google.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DEFERRED_DELETION_BYTES_FIELD_NUMBER = 3;
private long deferredDeletionBytes_;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
public static final int ALLOW_GROWTH_FIELD_NUMBER = 4;
private boolean allowGrowth_;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public boolean getAllowGrowth() {
return allowGrowth_;
}
public static final int VISIBLE_DEVICE_LIST_FIELD_NUMBER = 5;
private volatile java.lang.Object visibleDeviceList_;
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public java.lang.String getVisibleDeviceList() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
visibleDeviceList_ = s;
return s;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public com.google.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER = 6;
private int pollingActiveDelayUsecs_;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
public static final int POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER = 7;
private int pollingInactiveDelayMsecs_;
/**
*
* In the event polling loop sleep this many millisconds between
* PollEvents calls, when the queue is empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
public static final int FORCE_GPU_COMPATIBLE_FIELD_NUMBER = 8;
private boolean forceGpuCompatible_;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (perProcessGpuMemoryFraction_ != 0D) {
output.writeDouble(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
output.writeInt64(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
output.writeBool(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
output.writeInt32(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
output.writeInt32(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
output.writeBool(8, forceGpuCompatible_);
}
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (perProcessGpuMemoryFraction_ != 0D) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, forceGpuCompatible_);
}
memoizedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions other = (org.tensorflow.framework.GPUOptions) obj;
boolean result = true;
result = result && (
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction())
== java.lang.Double.doubleToLongBits(
other.getPerProcessGpuMemoryFraction()));
result = result && getAllocatorType()
.equals(other.getAllocatorType());
result = result && (getDeferredDeletionBytes()
== other.getDeferredDeletionBytes());
result = result && (getAllowGrowth()
== other.getAllowGrowth());
result = result && getVisibleDeviceList()
.equals(other.getVisibleDeviceList());
result = result && (getPollingActiveDelayUsecs()
== other.getPollingActiveDelayUsecs());
result = result && (getPollingInactiveDelayMsecs()
== other.getPollingInactiveDelayMsecs());
result = result && (getForceGpuCompatible()
== other.getForceGpuCompatible());
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction()));
hash = (37 * hash) + ALLOCATOR_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getAllocatorType().hashCode();
hash = (37 * hash) + DEFERRED_DELETION_BYTES_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getDeferredDeletionBytes());
hash = (37 * hash) + ALLOW_GROWTH_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowGrowth());
hash = (37 * hash) + VISIBLE_DEVICE_LIST_FIELD_NUMBER;
hash = (53 * hash) + getVisibleDeviceList().hashCode();
hash = (37 * hash) + POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingActiveDelayUsecs();
hash = (37 * hash) + POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingInactiveDelayMsecs();
hash = (37 * hash) + FORCE_GPU_COMPATIBLE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getForceGpuCompatible());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions)
org.tensorflow.framework.GPUOptionsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
public Builder clear() {
super.clear();
perProcessGpuMemoryFraction_ = 0D;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
allowGrowth_ = false;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
return this;
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.getDefaultInstance();
}
public org.tensorflow.framework.GPUOptions build() {
org.tensorflow.framework.GPUOptions result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.tensorflow.framework.GPUOptions buildPartial() {
org.tensorflow.framework.GPUOptions result = new org.tensorflow.framework.GPUOptions(this);
result.perProcessGpuMemoryFraction_ = perProcessGpuMemoryFraction_;
result.allocatorType_ = allocatorType_;
result.deferredDeletionBytes_ = deferredDeletionBytes_;
result.allowGrowth_ = allowGrowth_;
result.visibleDeviceList_ = visibleDeviceList_;
result.pollingActiveDelayUsecs_ = pollingActiveDelayUsecs_;
result.pollingInactiveDelayMsecs_ = pollingInactiveDelayMsecs_;
result.forceGpuCompatible_ = forceGpuCompatible_;
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions) {
return mergeFrom((org.tensorflow.framework.GPUOptions)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions other) {
if (other == org.tensorflow.framework.GPUOptions.getDefaultInstance()) return this;
if (other.getPerProcessGpuMemoryFraction() != 0D) {
setPerProcessGpuMemoryFraction(other.getPerProcessGpuMemoryFraction());
}
if (!other.getAllocatorType().isEmpty()) {
allocatorType_ = other.allocatorType_;
onChanged();
}
if (other.getDeferredDeletionBytes() != 0L) {
setDeferredDeletionBytes(other.getDeferredDeletionBytes());
}
if (other.getAllowGrowth() != false) {
setAllowGrowth(other.getAllowGrowth());
}
if (!other.getVisibleDeviceList().isEmpty()) {
visibleDeviceList_ = other.visibleDeviceList_;
onChanged();
}
if (other.getPollingActiveDelayUsecs() != 0) {
setPollingActiveDelayUsecs(other.getPollingActiveDelayUsecs());
}
if (other.getPollingInactiveDelayMsecs() != 0) {
setPollingInactiveDelayMsecs(other.getPollingInactiveDelayMsecs());
}
if (other.getForceGpuCompatible() != false) {
setForceGpuCompatible(other.getForceGpuCompatible());
}
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private double perProcessGpuMemoryFraction_ ;
/**
*
* A value between 0 and 1 that indicates what fraction of the
* available GPU memory to pre-allocate for each process. 1 means
* to pre-allocate all of the GPU memory, 0.5 means the process
* allocates ~50% of the available GPU memory.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
/**
*
* A value between 0 and 1 that indicates what fraction of the
* available GPU memory to pre-allocate for each process. 1 means
* to pre-allocate all of the GPU memory, 0.5 means the process
* allocates ~50% of the available GPU memory.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public Builder setPerProcessGpuMemoryFraction(double value) {
perProcessGpuMemoryFraction_ = value;
onChanged();
return this;
}
/**
*
* A value between 0 and 1 that indicates what fraction of the
* available GPU memory to pre-allocate for each process. 1 means
* to pre-allocate all of the GPU memory, 0.5 means the process
* allocates ~50% of the available GPU memory.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public Builder clearPerProcessGpuMemoryFraction() {
perProcessGpuMemoryFraction_ = 0D;
onChanged();
return this;
}
private java.lang.Object allocatorType_ = "";
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public java.lang.String getAllocatorType() {
java.lang.Object ref = allocatorType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
allocatorType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public com.google.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder setAllocatorType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
allocatorType_ = value;
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder clearAllocatorType() {
allocatorType_ = getDefaultInstance().getAllocatorType();
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder setAllocatorTypeBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
allocatorType_ = value;
onChanged();
return this;
}
private long deferredDeletionBytes_ ;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public Builder setDeferredDeletionBytes(long value) {
deferredDeletionBytes_ = value;
onChanged();
return this;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public Builder clearDeferredDeletionBytes() {
deferredDeletionBytes_ = 0L;
onChanged();
return this;
}
private boolean allowGrowth_ ;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public boolean getAllowGrowth() {
return allowGrowth_;
}
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public Builder setAllowGrowth(boolean value) {
allowGrowth_ = value;
onChanged();
return this;
}
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public Builder clearAllowGrowth() {
allowGrowth_ = false;
onChanged();
return this;
}
private java.lang.Object visibleDeviceList_ = "";
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public java.lang.String getVisibleDeviceList() {
java.lang.Object ref = visibleDeviceList_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
visibleDeviceList_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public com.google.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public Builder setVisibleDeviceList(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
visibleDeviceList_ = value;
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public Builder clearVisibleDeviceList() {
visibleDeviceList_ = getDefaultInstance().getVisibleDeviceList();
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* string visible_device_list = 5;
*/
public Builder setVisibleDeviceListBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
visibleDeviceList_ = value;
onChanged();
return this;
}
private int pollingActiveDelayUsecs_ ;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public Builder setPollingActiveDelayUsecs(int value) {
pollingActiveDelayUsecs_ = value;
onChanged();
return this;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public Builder clearPollingActiveDelayUsecs() {
pollingActiveDelayUsecs_ = 0;
onChanged();
return this;
}
private int pollingInactiveDelayMsecs_ ;
/**
*
* In the event polling loop sleep this many millisconds between
* PollEvents calls, when the queue is empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
/**
*
* In the event polling loop sleep this many millisconds between
* PollEvents calls, when the queue is empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public Builder setPollingInactiveDelayMsecs(int value) {
pollingInactiveDelayMsecs_ = value;
onChanged();
return this;
}
/**
*
* In the event polling loop sleep this many millisconds between
* PollEvents calls, when the queue is empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public Builder clearPollingInactiveDelayMsecs() {
pollingInactiveDelayMsecs_ = 0;
onChanged();
return this;
}
private boolean forceGpuCompatible_ ;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public Builder setForceGpuCompatible(boolean value) {
forceGpuCompatible_ = value;
onChanged();
return this;
}
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public Builder clearForceGpuCompatible() {
forceGpuCompatible_ = false;
onChanged();
return this;
}
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return this;
}
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return this;
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
private static final org.tensorflow.framework.GPUOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions();
}
public static org.tensorflow.framework.GPUOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
public GPUOptions parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GPUOptions(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}