org.tensorflow.framework.GPUOptions Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public final class GPUOptions extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions)
GPUOptionsOrBuilder {
private static final long serialVersionUID = 0L;
// Use GPUOptions.newBuilder() to construct.
private GPUOptions(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GPUOptions() {
perProcessGpuMemoryFraction_ = 0D;
allowGrowth_ = false;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GPUOptions(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 9: {
perProcessGpuMemoryFraction_ = input.readDouble();
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
allocatorType_ = s;
break;
}
case 24: {
deferredDeletionBytes_ = input.readInt64();
break;
}
case 32: {
allowGrowth_ = input.readBool();
break;
}
case 42: {
java.lang.String s = input.readStringRequireUtf8();
visibleDeviceList_ = s;
break;
}
case 48: {
pollingActiveDelayUsecs_ = input.readInt32();
break;
}
case 56: {
pollingInactiveDelayMsecs_ = input.readInt32();
break;
}
case 64: {
forceGpuCompatible_ = input.readBool();
break;
}
case 74: {
org.tensorflow.framework.GPUOptions.Experimental.Builder subBuilder = null;
if (experimental_ != null) {
subBuilder = experimental_.toBuilder();
}
experimental_ = input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(experimental_);
experimental_ = subBuilder.buildPartial();
}
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
public interface ExperimentalOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental)
com.google.protobuf.MessageOrBuilder {
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
java.util.List
getVirtualDevicesList();
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index);
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
int getVirtualDevicesCount();
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
java.util.List extends org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>
getVirtualDevicesOrBuilderList();
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index);
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
boolean getUseUnifiedMemory();
/**
*
* If > 1, the number of device-to-device copy streams to create
* for each GPUDevice. Default value is 0, which is automatically
* converted to 1.
*
*
* int32 num_dev_to_dev_copy_streams = 3;
*/
int getNumDevToDevCopyStreams();
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Experimental extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental)
ExperimentalOrBuilder {
private static final long serialVersionUID = 0L;
// Use Experimental.newBuilder() to construct.
private Experimental(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Experimental() {
virtualDevices_ = java.util.Collections.emptyList();
useUnifiedMemory_ = false;
numDevToDevCopyStreams_ = 0;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Experimental(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
virtualDevices_.add(
input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.parser(), extensionRegistry));
break;
}
case 16: {
useUnifiedMemory_ = input.readBool();
break;
}
case 24: {
numDevToDevCopyStreams_ = input.readInt32();
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
public interface VirtualDevicesOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental.VirtualDevices)
com.google.protobuf.MessageOrBuilder {
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
java.util.List getMemoryLimitMbList();
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
int getMemoryLimitMbCount();
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
float getMemoryLimitMb(int index);
}
/**
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class VirtualDevices extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
VirtualDevicesOrBuilder {
private static final long serialVersionUID = 0L;
// Use VirtualDevices.newBuilder() to construct.
private VirtualDevices(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private VirtualDevices() {
memoryLimitMb_ = java.util.Collections.emptyList();
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VirtualDevices(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 13: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
memoryLimitMb_.add(input.readFloat());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
memoryLimitMb_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
memoryLimitMb_.add(input.readFloat());
}
input.popLimit(limit);
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
public static final int MEMORY_LIMIT_MB_FIELD_NUMBER = 1;
private java.util.List memoryLimitMb_;
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public java.util.List
getMemoryLimitMbList() {
return memoryLimitMb_;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public float getMemoryLimitMb(int index) {
return memoryLimitMb_.get(index);
}
private int memoryLimitMbMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getMemoryLimitMbList().size() > 0) {
output.writeUInt32NoTag(10);
output.writeUInt32NoTag(memoryLimitMbMemoizedSerializedSize);
}
for (int i = 0; i < memoryLimitMb_.size(); i++) {
output.writeFloatNoTag(memoryLimitMb_.get(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
dataSize = 4 * getMemoryLimitMbList().size();
size += dataSize;
if (!getMemoryLimitMbList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
memoryLimitMbMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) obj;
boolean result = true;
result = result && getMemoryLimitMbList()
.equals(other.getMemoryLimitMbList());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getMemoryLimitMbCount() > 0) {
hash = (37 * hash) + MEMORY_LIMIT_MB_FIELD_NUMBER;
hash = (53 * hash) + getMemoryLimitMbList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
memoryLimitMb_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices build() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.memoryLimitMb_ = memoryLimitMb_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()) return this;
if (!other.memoryLimitMb_.isEmpty()) {
if (memoryLimitMb_.isEmpty()) {
memoryLimitMb_ = other.memoryLimitMb_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.addAll(other.memoryLimitMb_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List memoryLimitMb_ = java.util.Collections.emptyList();
private void ensureMemoryLimitMbIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = new java.util.ArrayList(memoryLimitMb_);
bitField0_ |= 0x00000001;
}
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public java.util.List
getMemoryLimitMbList() {
return java.util.Collections.unmodifiableList(memoryLimitMb_);
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public float getMemoryLimitMb(int index) {
return memoryLimitMb_.get(index);
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public Builder setMemoryLimitMb(
int index, float value) {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.set(index, value);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public Builder addMemoryLimitMb(float value) {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.add(value);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public Builder addAllMemoryLimitMb(
java.lang.Iterable extends java.lang.Float> values) {
ensureMemoryLimitMbIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, memoryLimitMb_);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public Builder clearMemoryLimitMb() {
memoryLimitMb_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
private static final org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices();
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public VirtualDevices parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new VirtualDevices(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private int bitField0_;
public static final int VIRTUAL_DEVICES_FIELD_NUMBER = 1;
private java.util.List virtualDevices_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public java.util.List getVirtualDevicesList() {
return virtualDevices_;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public java.util.List extends org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>
getVirtualDevicesOrBuilderList() {
return virtualDevices_;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public int getVirtualDevicesCount() {
return virtualDevices_.size();
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index) {
return virtualDevices_.get(index);
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
return virtualDevices_.get(index);
}
public static final int USE_UNIFIED_MEMORY_FIELD_NUMBER = 2;
private boolean useUnifiedMemory_;
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public boolean getUseUnifiedMemory() {
return useUnifiedMemory_;
}
public static final int NUM_DEV_TO_DEV_COPY_STREAMS_FIELD_NUMBER = 3;
private int numDevToDevCopyStreams_;
/**
*
* If > 1, the number of device-to-device copy streams to create
* for each GPUDevice. Default value is 0, which is automatically
* converted to 1.
*
*
* int32 num_dev_to_dev_copy_streams = 3;
*/
public int getNumDevToDevCopyStreams() {
return numDevToDevCopyStreams_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < virtualDevices_.size(); i++) {
output.writeMessage(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
output.writeBool(2, useUnifiedMemory_);
}
if (numDevToDevCopyStreams_ != 0) {
output.writeInt32(3, numDevToDevCopyStreams_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < virtualDevices_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, useUnifiedMemory_);
}
if (numDevToDevCopyStreams_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, numDevToDevCopyStreams_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental other = (org.tensorflow.framework.GPUOptions.Experimental) obj;
boolean result = true;
result = result && getVirtualDevicesList()
.equals(other.getVirtualDevicesList());
result = result && (getUseUnifiedMemory()
== other.getUseUnifiedMemory());
result = result && (getNumDevToDevCopyStreams()
== other.getNumDevToDevCopyStreams());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getVirtualDevicesCount() > 0) {
hash = (37 * hash) + VIRTUAL_DEVICES_FIELD_NUMBER;
hash = (53 * hash) + getVirtualDevicesList().hashCode();
}
hash = (37 * hash) + USE_UNIFIED_MEMORY_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getUseUnifiedMemory());
hash = (37 * hash) + NUM_DEV_TO_DEV_COPY_STREAMS_FIELD_NUMBER;
hash = (53 * hash) + getNumDevToDevCopyStreams();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental)
org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getVirtualDevicesFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (virtualDevicesBuilder_ == null) {
virtualDevices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
virtualDevicesBuilder_.clear();
}
useUnifiedMemory_ = false;
numDevToDevCopyStreams_ = 0;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental build() {
org.tensorflow.framework.GPUOptions.Experimental result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental result = new org.tensorflow.framework.GPUOptions.Experimental(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (virtualDevicesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.virtualDevices_ = virtualDevices_;
} else {
result.virtualDevices_ = virtualDevicesBuilder_.build();
}
result.useUnifiedMemory_ = useUnifiedMemory_;
result.numDevToDevCopyStreams_ = numDevToDevCopyStreams_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance()) return this;
if (virtualDevicesBuilder_ == null) {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevices_.isEmpty()) {
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVirtualDevicesIsMutable();
virtualDevices_.addAll(other.virtualDevices_);
}
onChanged();
}
} else {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevicesBuilder_.isEmpty()) {
virtualDevicesBuilder_.dispose();
virtualDevicesBuilder_ = null;
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
virtualDevicesBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getVirtualDevicesFieldBuilder() : null;
} else {
virtualDevicesBuilder_.addAllMessages(other.virtualDevices_);
}
}
}
if (other.getUseUnifiedMemory() != false) {
setUseUnifiedMemory(other.getUseUnifiedMemory());
}
if (other.getNumDevToDevCopyStreams() != 0) {
setNumDevToDevCopyStreams(other.getNumDevToDevCopyStreams());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List virtualDevices_ =
java.util.Collections.emptyList();
private void ensureVirtualDevicesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = new java.util.ArrayList(virtualDevices_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder> virtualDevicesBuilder_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public java.util.List getVirtualDevicesList() {
if (virtualDevicesBuilder_ == null) {
return java.util.Collections.unmodifiableList(virtualDevices_);
} else {
return virtualDevicesBuilder_.getMessageList();
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public int getVirtualDevicesCount() {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.size();
} else {
return virtualDevicesBuilder_.getCount();
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index) {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.get(index);
} else {
return virtualDevicesBuilder_.getMessage(index);
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder setVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.set(index, value);
onChanged();
} else {
virtualDevicesBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder setVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) {
if (virtualDevicesBuilder_ == null) {
ensureVirtualDevicesIsMutable();
virtualDevices_.set(index, builderForValue.build());
onChanged();
} else {
virtualDevicesBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(index, value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) {
if (virtualDevicesBuilder_ == null) {
ensureVirtualDevicesIsMutable();
virtualDevices_.add(builderForValue.build());
onChanged();
} else {
virtualDevicesBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) {
if (virtualDevicesBuilder_ == null) {
ensureVirtualDevicesIsMutable();
virtualDevices_.add(index, builderForValue.build());
onChanged();
} else {
virtualDevicesBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addAllVirtualDevices(
java.lang.Iterable extends org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices> values) {
if (virtualDevicesBuilder_ == null) {
ensureVirtualDevicesIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, virtualDevices_);
onChanged();
} else {
virtualDevicesBuilder_.addAllMessages(values);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder clearVirtualDevices() {
if (virtualDevicesBuilder_ == null) {
virtualDevices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
virtualDevicesBuilder_.clear();
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder removeVirtualDevices(int index) {
if (virtualDevicesBuilder_ == null) {
ensureVirtualDevicesIsMutable();
virtualDevices_.remove(index);
onChanged();
} else {
virtualDevicesBuilder_.remove(index);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder getVirtualDevicesBuilder(
int index) {
return getVirtualDevicesFieldBuilder().getBuilder(index);
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.get(index); } else {
return virtualDevicesBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public java.util.List extends org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>
getVirtualDevicesOrBuilderList() {
if (virtualDevicesBuilder_ != null) {
return virtualDevicesBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(virtualDevices_);
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder() {
return getVirtualDevicesFieldBuilder().addBuilder(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance());
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder(
int index) {
return getVirtualDevicesFieldBuilder().addBuilder(
index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance());
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public java.util.List
getVirtualDevicesBuilderList() {
return getVirtualDevicesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>
getVirtualDevicesFieldBuilder() {
if (virtualDevicesBuilder_ == null) {
virtualDevicesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>(
virtualDevices_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
virtualDevices_ = null;
}
return virtualDevicesBuilder_;
}
private boolean useUnifiedMemory_ ;
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public boolean getUseUnifiedMemory() {
return useUnifiedMemory_;
}
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public Builder setUseUnifiedMemory(boolean value) {
useUnifiedMemory_ = value;
onChanged();
return this;
}
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public Builder clearUseUnifiedMemory() {
useUnifiedMemory_ = false;
onChanged();
return this;
}
private int numDevToDevCopyStreams_ ;
/**
*
* If > 1, the number of device-to-device copy streams to create
* for each GPUDevice. Default value is 0, which is automatically
* converted to 1.
*
*
* int32 num_dev_to_dev_copy_streams = 3;
*/
public int getNumDevToDevCopyStreams() {
return numDevToDevCopyStreams_;
}
/**
*
* If > 1, the number of device-to-device copy streams to create
* for each GPUDevice. Default value is 0, which is automatically
* converted to 1.
*
*
* int32 num_dev_to_dev_copy_streams = 3;
*/
public Builder setNumDevToDevCopyStreams(int value) {
numDevToDevCopyStreams_ = value;
onChanged();
return this;
}
/**
*
* If > 1, the number of device-to-device copy streams to create
* for each GPUDevice. Default value is 0, which is automatically
* converted to 1.
*
*
* int32 num_dev_to_dev_copy_streams = 3;
*/
public Builder clearNumDevToDevCopyStreams() {
numDevToDevCopyStreams_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental)
private static final org.tensorflow.framework.GPUOptions.Experimental DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental();
}
public static org.tensorflow.framework.GPUOptions.Experimental getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public Experimental parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Experimental(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public static final int PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER = 1;
private double perProcessGpuMemoryFraction_;
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
public static final int ALLOW_GROWTH_FIELD_NUMBER = 4;
private boolean allowGrowth_;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public boolean getAllowGrowth() {
return allowGrowth_;
}
public static final int ALLOCATOR_TYPE_FIELD_NUMBER = 2;
private volatile java.lang.Object allocatorType_;
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public java.lang.String getAllocatorType() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
allocatorType_ = s;
return s;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public com.google.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DEFERRED_DELETION_BYTES_FIELD_NUMBER = 3;
private long deferredDeletionBytes_;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
public static final int VISIBLE_DEVICE_LIST_FIELD_NUMBER = 5;
private volatile java.lang.Object visibleDeviceList_;
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public java.lang.String getVisibleDeviceList() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
visibleDeviceList_ = s;
return s;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public com.google.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER = 6;
private int pollingActiveDelayUsecs_;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
public static final int POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER = 7;
private int pollingInactiveDelayMsecs_;
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
public static final int FORCE_GPU_COMPATIBLE_FIELD_NUMBER = 8;
private boolean forceGpuCompatible_;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
public static final int EXPERIMENTAL_FIELD_NUMBER = 9;
private org.tensorflow.framework.GPUOptions.Experimental experimental_;
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public boolean hasExperimental() {
return experimental_ != null;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.Experimental getExperimental() {
return experimental_ == null ? org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder() {
return getExperimental();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (perProcessGpuMemoryFraction_ != 0D) {
output.writeDouble(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
output.writeInt64(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
output.writeBool(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
output.writeInt32(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
output.writeInt32(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
output.writeBool(8, forceGpuCompatible_);
}
if (experimental_ != null) {
output.writeMessage(9, getExperimental());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (perProcessGpuMemoryFraction_ != 0D) {
size += com.google.protobuf.CodedOutputStream
.computeDoubleSize(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, forceGpuCompatible_);
}
if (experimental_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(9, getExperimental());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions other = (org.tensorflow.framework.GPUOptions) obj;
boolean result = true;
result = result && (
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction())
== java.lang.Double.doubleToLongBits(
other.getPerProcessGpuMemoryFraction()));
result = result && (getAllowGrowth()
== other.getAllowGrowth());
result = result && getAllocatorType()
.equals(other.getAllocatorType());
result = result && (getDeferredDeletionBytes()
== other.getDeferredDeletionBytes());
result = result && getVisibleDeviceList()
.equals(other.getVisibleDeviceList());
result = result && (getPollingActiveDelayUsecs()
== other.getPollingActiveDelayUsecs());
result = result && (getPollingInactiveDelayMsecs()
== other.getPollingInactiveDelayMsecs());
result = result && (getForceGpuCompatible()
== other.getForceGpuCompatible());
result = result && (hasExperimental() == other.hasExperimental());
if (hasExperimental()) {
result = result && getExperimental()
.equals(other.getExperimental());
}
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction()));
hash = (37 * hash) + ALLOW_GROWTH_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowGrowth());
hash = (37 * hash) + ALLOCATOR_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getAllocatorType().hashCode();
hash = (37 * hash) + DEFERRED_DELETION_BYTES_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getDeferredDeletionBytes());
hash = (37 * hash) + VISIBLE_DEVICE_LIST_FIELD_NUMBER;
hash = (53 * hash) + getVisibleDeviceList().hashCode();
hash = (37 * hash) + POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingActiveDelayUsecs();
hash = (37 * hash) + POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingInactiveDelayMsecs();
hash = (37 * hash) + FORCE_GPU_COMPATIBLE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getForceGpuCompatible());
if (hasExperimental()) {
hash = (37 * hash) + EXPERIMENTAL_FIELD_NUMBER;
hash = (53 * hash) + getExperimental().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions)
org.tensorflow.framework.GPUOptionsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
perProcessGpuMemoryFraction_ = 0D;
allowGrowth_ = false;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
if (experimentalBuilder_ == null) {
experimental_ = null;
} else {
experimental_ = null;
experimentalBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions build() {
org.tensorflow.framework.GPUOptions result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions buildPartial() {
org.tensorflow.framework.GPUOptions result = new org.tensorflow.framework.GPUOptions(this);
result.perProcessGpuMemoryFraction_ = perProcessGpuMemoryFraction_;
result.allowGrowth_ = allowGrowth_;
result.allocatorType_ = allocatorType_;
result.deferredDeletionBytes_ = deferredDeletionBytes_;
result.visibleDeviceList_ = visibleDeviceList_;
result.pollingActiveDelayUsecs_ = pollingActiveDelayUsecs_;
result.pollingInactiveDelayMsecs_ = pollingInactiveDelayMsecs_;
result.forceGpuCompatible_ = forceGpuCompatible_;
if (experimentalBuilder_ == null) {
result.experimental_ = experimental_;
} else {
result.experimental_ = experimentalBuilder_.build();
}
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions) {
return mergeFrom((org.tensorflow.framework.GPUOptions)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions other) {
if (other == org.tensorflow.framework.GPUOptions.getDefaultInstance()) return this;
if (other.getPerProcessGpuMemoryFraction() != 0D) {
setPerProcessGpuMemoryFraction(other.getPerProcessGpuMemoryFraction());
}
if (other.getAllowGrowth() != false) {
setAllowGrowth(other.getAllowGrowth());
}
if (!other.getAllocatorType().isEmpty()) {
allocatorType_ = other.allocatorType_;
onChanged();
}
if (other.getDeferredDeletionBytes() != 0L) {
setDeferredDeletionBytes(other.getDeferredDeletionBytes());
}
if (!other.getVisibleDeviceList().isEmpty()) {
visibleDeviceList_ = other.visibleDeviceList_;
onChanged();
}
if (other.getPollingActiveDelayUsecs() != 0) {
setPollingActiveDelayUsecs(other.getPollingActiveDelayUsecs());
}
if (other.getPollingInactiveDelayMsecs() != 0) {
setPollingInactiveDelayMsecs(other.getPollingInactiveDelayMsecs());
}
if (other.getForceGpuCompatible() != false) {
setForceGpuCompatible(other.getForceGpuCompatible());
}
if (other.hasExperimental()) {
mergeExperimental(other.getExperimental());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private double perProcessGpuMemoryFraction_ ;
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public Builder setPerProcessGpuMemoryFraction(double value) {
perProcessGpuMemoryFraction_ = value;
onChanged();
return this;
}
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public Builder clearPerProcessGpuMemoryFraction() {
perProcessGpuMemoryFraction_ = 0D;
onChanged();
return this;
}
private boolean allowGrowth_ ;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public boolean getAllowGrowth() {
return allowGrowth_;
}
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public Builder setAllowGrowth(boolean value) {
allowGrowth_ = value;
onChanged();
return this;
}
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public Builder clearAllowGrowth() {
allowGrowth_ = false;
onChanged();
return this;
}
private java.lang.Object allocatorType_ = "";
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public java.lang.String getAllocatorType() {
java.lang.Object ref = allocatorType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
allocatorType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public com.google.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder setAllocatorType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
allocatorType_ = value;
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder clearAllocatorType() {
allocatorType_ = getDefaultInstance().getAllocatorType();
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder setAllocatorTypeBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
allocatorType_ = value;
onChanged();
return this;
}
private long deferredDeletionBytes_ ;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public Builder setDeferredDeletionBytes(long value) {
deferredDeletionBytes_ = value;
onChanged();
return this;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public Builder clearDeferredDeletionBytes() {
deferredDeletionBytes_ = 0L;
onChanged();
return this;
}
private java.lang.Object visibleDeviceList_ = "";
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public java.lang.String getVisibleDeviceList() {
java.lang.Object ref = visibleDeviceList_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
visibleDeviceList_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public com.google.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public Builder setVisibleDeviceList(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
visibleDeviceList_ = value;
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public Builder clearVisibleDeviceList() {
visibleDeviceList_ = getDefaultInstance().getVisibleDeviceList();
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "platform GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public Builder setVisibleDeviceListBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
visibleDeviceList_ = value;
onChanged();
return this;
}
private int pollingActiveDelayUsecs_ ;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public Builder setPollingActiveDelayUsecs(int value) {
pollingActiveDelayUsecs_ = value;
onChanged();
return this;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public Builder clearPollingActiveDelayUsecs() {
pollingActiveDelayUsecs_ = 0;
onChanged();
return this;
}
private int pollingInactiveDelayMsecs_ ;
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public Builder setPollingInactiveDelayMsecs(int value) {
pollingInactiveDelayMsecs_ = value;
onChanged();
return this;
}
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public Builder clearPollingInactiveDelayMsecs() {
pollingInactiveDelayMsecs_ = 0;
onChanged();
return this;
}
private boolean forceGpuCompatible_ ;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public Builder setForceGpuCompatible(boolean value) {
forceGpuCompatible_ = value;
onChanged();
return this;
}
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public Builder clearForceGpuCompatible() {
forceGpuCompatible_ = false;
onChanged();
return this;
}
private org.tensorflow.framework.GPUOptions.Experimental experimental_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder> experimentalBuilder_;
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public boolean hasExperimental() {
return experimentalBuilder_ != null || experimental_ != null;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.Experimental getExperimental() {
if (experimentalBuilder_ == null) {
return experimental_ == null ? org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_;
} else {
return experimentalBuilder_.getMessage();
}
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder setExperimental(org.tensorflow.framework.GPUOptions.Experimental value) {
if (experimentalBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
experimental_ = value;
onChanged();
} else {
experimentalBuilder_.setMessage(value);
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder setExperimental(
org.tensorflow.framework.GPUOptions.Experimental.Builder builderForValue) {
if (experimentalBuilder_ == null) {
experimental_ = builderForValue.build();
onChanged();
} else {
experimentalBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder mergeExperimental(org.tensorflow.framework.GPUOptions.Experimental value) {
if (experimentalBuilder_ == null) {
if (experimental_ != null) {
experimental_ =
org.tensorflow.framework.GPUOptions.Experimental.newBuilder(experimental_).mergeFrom(value).buildPartial();
} else {
experimental_ = value;
}
onChanged();
} else {
experimentalBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder clearExperimental() {
if (experimentalBuilder_ == null) {
experimental_ = null;
onChanged();
} else {
experimental_ = null;
experimentalBuilder_ = null;
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.Experimental.Builder getExperimentalBuilder() {
onChanged();
return getExperimentalFieldBuilder().getBuilder();
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder() {
if (experimentalBuilder_ != null) {
return experimentalBuilder_.getMessageOrBuilder();
} else {
return experimental_ == null ?
org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_;
}
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>
getExperimentalFieldBuilder() {
if (experimentalBuilder_ == null) {
experimentalBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>(
getExperimental(),
getParentForChildren(),
isClean());
experimental_ = null;
}
return experimentalBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
private static final org.tensorflow.framework.GPUOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions();
}
public static org.tensorflow.framework.GPUOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public GPUOptions parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GPUOptions(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}