Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public final class GPUOptions extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions)
GPUOptionsOrBuilder {
private static final long serialVersionUID = 0L;
// Use GPUOptions.newBuilder() to construct.
private GPUOptions(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder builder) {
super(builder);
}
private GPUOptions() {
allocatorType_ = "";
visibleDeviceList_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new GPUOptions();
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GPUOptions(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 9: {
perProcessGpuMemoryFraction_ = input.readDouble();
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
allocatorType_ = s;
break;
}
case 24: {
deferredDeletionBytes_ = input.readInt64();
break;
}
case 32: {
allowGrowth_ = input.readBool();
break;
}
case 42: {
java.lang.String s = input.readStringRequireUtf8();
visibleDeviceList_ = s;
break;
}
case 48: {
pollingActiveDelayUsecs_ = input.readInt32();
break;
}
case 56: {
pollingInactiveDelayMsecs_ = input.readInt32();
break;
}
case 64: {
forceGpuCompatible_ = input.readBool();
break;
}
case 74: {
org.tensorflow.framework.GPUOptions.Experimental.Builder subBuilder = null;
if (experimental_ != null) {
subBuilder = experimental_.toBuilder();
}
experimental_ = input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(experimental_);
experimental_ = subBuilder.buildPartial();
}
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
@java.lang.Override
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
public interface ExperimentalOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental)
org.nd4j.shade.protobuf.MessageOrBuilder {
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
* @return The useUnifiedMemory.
*/
boolean getUseUnifiedMemory();
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Experimental extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental)
ExperimentalOrBuilder {
private static final long serialVersionUID = 0L;
// Use Experimental.newBuilder() to construct.
private Experimental(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder builder) {
super(builder);
}
private Experimental() {
virtualDevices_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Experimental();
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Experimental(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
if (!((mutable_bitField0_ & 0x00000001) != 0)) {
virtualDevices_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
virtualDevices_.add(
input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.parser(), extensionRegistry));
break;
}
case 16: {
useUnifiedMemory_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) != 0)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
public interface VirtualDevicesOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental.VirtualDevices)
org.nd4j.shade.protobuf.MessageOrBuilder {
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return A list containing the memoryLimitMb.
*/
java.util.List getMemoryLimitMbList();
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return The count of memoryLimitMb.
*/
int getMemoryLimitMbCount();
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param index The index of the element to return.
* @return The memoryLimitMb at the given index.
*/
float getMemoryLimitMb(int index);
}
/**
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class VirtualDevices extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
VirtualDevicesOrBuilder {
private static final long serialVersionUID = 0L;
// Use VirtualDevices.newBuilder() to construct.
private VirtualDevices(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder builder) {
super(builder);
}
private VirtualDevices() {
memoryLimitMb_ = emptyFloatList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new VirtualDevices();
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VirtualDevices(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 13: {
if (!((mutable_bitField0_ & 0x00000001) != 0)) {
memoryLimitMb_ = newFloatList();
mutable_bitField0_ |= 0x00000001;
}
memoryLimitMb_.addFloat(input.readFloat());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) != 0) && input.getBytesUntilLimit() > 0) {
memoryLimitMb_ = newFloatList();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
memoryLimitMb_.addFloat(input.readFloat());
}
input.popLimit(limit);
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) != 0)) {
memoryLimitMb_.makeImmutable(); // C
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
public static final int MEMORY_LIMIT_MB_FIELD_NUMBER = 1;
private org.nd4j.shade.protobuf.Internal.FloatList memoryLimitMb_;
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return A list containing the memoryLimitMb.
*/
@java.lang.Override
public java.util.List
getMemoryLimitMbList() {
return memoryLimitMb_;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return The count of memoryLimitMb.
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param index The index of the element to return.
* @return The memoryLimitMb at the given index.
*/
public float getMemoryLimitMb(int index) {
return memoryLimitMb_.getFloat(index);
}
private int memoryLimitMbMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.nd4j.shade.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getMemoryLimitMbList().size() > 0) {
output.writeUInt32NoTag(10);
output.writeUInt32NoTag(memoryLimitMbMemoizedSerializedSize);
}
for (int i = 0; i < memoryLimitMb_.size(); i++) {
output.writeFloatNoTag(memoryLimitMb_.getFloat(i));
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
dataSize = 4 * getMemoryLimitMbList().size();
size += dataSize;
if (!getMemoryLimitMbList().isEmpty()) {
size += 1;
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
memoryLimitMbMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) obj;
if (!getMemoryLimitMbList()
.equals(other.getMemoryLimitMbList())) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getMemoryLimitMbCount() > 0) {
hash = (37 * hash) + MEMORY_LIMIT_MB_FIELD_NUMBER;
hash = (53 * hash) + getMemoryLimitMbList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.ByteString data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.ByteString data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(byte[] data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
byte[] data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class Builder extends
org.nd4j.shade.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder {
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.nd4j.shade.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
memoryLimitMb_ = emptyFloatList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices build() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) != 0)) {
memoryLimitMb_.makeImmutable();
bitField0_ = (bitField0_ & ~0x00000001);
}
result.memoryLimitMb_ = memoryLimitMb_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.nd4j.shade.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.nd4j.shade.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()) return this;
if (!other.memoryLimitMb_.isEmpty()) {
if (memoryLimitMb_.isEmpty()) {
memoryLimitMb_ = other.memoryLimitMb_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.addAll(other.memoryLimitMb_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private org.nd4j.shade.protobuf.Internal.FloatList memoryLimitMb_ = emptyFloatList();
private void ensureMemoryLimitMbIsMutable() {
if (!((bitField0_ & 0x00000001) != 0)) {
memoryLimitMb_ = mutableCopy(memoryLimitMb_);
bitField0_ |= 0x00000001;
}
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return A list containing the memoryLimitMb.
*/
public java.util.List
getMemoryLimitMbList() {
return ((bitField0_ & 0x00000001) != 0) ?
java.util.Collections.unmodifiableList(memoryLimitMb_) : memoryLimitMb_;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return The count of memoryLimitMb.
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param index The index of the element to return.
* @return The memoryLimitMb at the given index.
*/
public float getMemoryLimitMb(int index) {
return memoryLimitMb_.getFloat(index);
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param index The index to set the value at.
* @param value The memoryLimitMb to set.
* @return This builder for chaining.
*/
public Builder setMemoryLimitMb(
int index, float value) {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.setFloat(index, value);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param value The memoryLimitMb to add.
* @return This builder for chaining.
*/
public Builder addMemoryLimitMb(float value) {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.addFloat(value);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @param values The memoryLimitMb to add.
* @return This builder for chaining.
*/
public Builder addAllMemoryLimitMb(
java.lang.Iterable values) {
ensureMemoryLimitMbIsMutable();
org.nd4j.shade.protobuf.AbstractMessageLite.Builder.addAll(
values, memoryLimitMb_);
onChanged();
return this;
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
* @return This builder for chaining.
*/
public Builder clearMemoryLimitMb() {
memoryLimitMb_ = emptyFloatList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
private static final org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices();
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
@java.lang.Override
public VirtualDevices parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new VirtualDevices(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public static final int VIRTUAL_DEVICES_FIELD_NUMBER = 1;
private java.util.List virtualDevices_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
@java.lang.Override
public int getVirtualDevicesCount() {
return virtualDevices_.size();
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
return virtualDevices_.get(index);
}
public static final int USE_UNIFIED_MEMORY_FIELD_NUMBER = 2;
private boolean useUnifiedMemory_;
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
* @return The useUnifiedMemory.
*/
@java.lang.Override
public boolean getUseUnifiedMemory() {
return useUnifiedMemory_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(org.nd4j.shade.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < virtualDevices_.size(); i++) {
output.writeMessage(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
output.writeBool(2, useUnifiedMemory_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < virtualDevices_.size(); i++) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeMessageSize(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeBoolSize(2, useUnifiedMemory_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental other = (org.tensorflow.framework.GPUOptions.Experimental) obj;
if (!getVirtualDevicesList()
.equals(other.getVirtualDevicesList())) return false;
if (getUseUnifiedMemory()
!= other.getUseUnifiedMemory()) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getVirtualDevicesCount() > 0) {
hash = (37 * hash) + VIRTUAL_DEVICES_FIELD_NUMBER;
hash = (53 * hash) + getVirtualDevicesList().hashCode();
}
hash = (37 * hash) + USE_UNIFIED_MEMORY_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashBoolean(
getUseUnifiedMemory());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.ByteString data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.ByteString data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(byte[] data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
byte[] data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Builder extends
org.nd4j.shade.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental)
org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder {
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.nd4j.shade.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getVirtualDevicesFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
if (virtualDevicesBuilder_ == null) {
virtualDevices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
virtualDevicesBuilder_.clear();
}
useUnifiedMemory_ = false;
return this;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental build() {
org.tensorflow.framework.GPUOptions.Experimental result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental result = new org.tensorflow.framework.GPUOptions.Experimental(this);
int from_bitField0_ = bitField0_;
if (virtualDevicesBuilder_ == null) {
if (((bitField0_ & 0x00000001) != 0)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.virtualDevices_ = virtualDevices_;
} else {
result.virtualDevices_ = virtualDevicesBuilder_.build();
}
result.useUnifiedMemory_ = useUnifiedMemory_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
org.nd4j.shade.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(org.nd4j.shade.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance()) return this;
if (virtualDevicesBuilder_ == null) {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevices_.isEmpty()) {
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVirtualDevicesIsMutable();
virtualDevices_.addAll(other.virtualDevices_);
}
onChanged();
}
} else {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevicesBuilder_.isEmpty()) {
virtualDevicesBuilder_.dispose();
virtualDevicesBuilder_ = null;
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
virtualDevicesBuilder_ =
org.nd4j.shade.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getVirtualDevicesFieldBuilder() : null;
} else {
virtualDevicesBuilder_.addAllMessages(other.virtualDevices_);
}
}
}
if (other.getUseUnifiedMemory() != false) {
setUseUnifiedMemory(other.getUseUnifiedMemory());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List virtualDevices_ =
java.util.Collections.emptyList();
private void ensureVirtualDevicesIsMutable() {
if (!((bitField0_ & 0x00000001) != 0)) {
virtualDevices_ = new java.util.ArrayList(virtualDevices_);
bitField0_ |= 0x00000001;
}
}
private org.nd4j.shade.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder> virtualDevicesBuilder_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public int getVirtualDevicesCount() {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.size();
} else {
return virtualDevicesBuilder_.getCount();
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder setVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.set(index, value);
onChanged();
} else {
virtualDevicesBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(index, value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder getVirtualDevicesBuilder(
int index) {
return getVirtualDevicesFieldBuilder().getBuilder(index);
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.get(index); } else {
return virtualDevicesBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
* @param value The useUnifiedMemory to set.
* @return This builder for chaining.
*/
public Builder setUseUnifiedMemory(boolean value) {
useUnifiedMemory_ = value;
onChanged();
return this;
}
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
* @return This builder for chaining.
*/
public Builder clearUseUnifiedMemory() {
useUnifiedMemory_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental)
private static final org.tensorflow.framework.GPUOptions.Experimental DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental();
}
public static org.tensorflow.framework.GPUOptions.Experimental getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
@java.lang.Override
public Experimental parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new Experimental(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public static final int PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER = 1;
private double perProcessGpuMemoryFraction_;
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
* @return The perProcessGpuMemoryFraction.
*/
@java.lang.Override
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
public static final int ALLOW_GROWTH_FIELD_NUMBER = 4;
private boolean allowGrowth_;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
* @return The allowGrowth.
*/
@java.lang.Override
public boolean getAllowGrowth() {
return allowGrowth_;
}
public static final int ALLOCATOR_TYPE_FIELD_NUMBER = 2;
private volatile java.lang.Object allocatorType_;
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
* @return The bytes for allocatorType.
*/
@java.lang.Override
public org.nd4j.shade.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
public static final int DEFERRED_DELETION_BYTES_FIELD_NUMBER = 3;
private long deferredDeletionBytes_;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
* @return The deferredDeletionBytes.
*/
@java.lang.Override
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
public static final int VISIBLE_DEVICE_LIST_FIELD_NUMBER = 5;
private volatile java.lang.Object visibleDeviceList_;
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
* @return The bytes for visibleDeviceList.
*/
@java.lang.Override
public org.nd4j.shade.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
public static final int POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER = 6;
private int pollingActiveDelayUsecs_;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
* @return The pollingActiveDelayUsecs.
*/
@java.lang.Override
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
public static final int POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER = 7;
private int pollingInactiveDelayMsecs_;
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
* @return The pollingInactiveDelayMsecs.
*/
@java.lang.Override
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
public static final int FORCE_GPU_COMPATIBLE_FIELD_NUMBER = 8;
private boolean forceGpuCompatible_;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
* @return The forceGpuCompatible.
*/
@java.lang.Override
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
public static final int EXPERIMENTAL_FIELD_NUMBER = 9;
private org.tensorflow.framework.GPUOptions.Experimental experimental_;
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
* @return Whether the experimental field is set.
*/
@java.lang.Override
public boolean hasExperimental() {
return experimental_ != null;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
* @param value The perProcessGpuMemoryFraction to set.
* @return This builder for chaining.
*/
public Builder setPerProcessGpuMemoryFraction(double value) {
perProcessGpuMemoryFraction_ = value;
onChanged();
return this;
}
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
* @return This builder for chaining.
*/
public Builder clearPerProcessGpuMemoryFraction() {
perProcessGpuMemoryFraction_ = 0D;
onChanged();
return this;
}
private boolean allowGrowth_ ;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
* @return The bytes for allocatorType.
*/
public org.nd4j.shade.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
* @param value The allocatorType to set.
* @return This builder for chaining.
*/
public Builder setAllocatorType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
allocatorType_ = value;
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
* @return This builder for chaining.
*/
public Builder clearAllocatorType() {
allocatorType_ = getDefaultInstance().getAllocatorType();
onChanged();
return this;
}
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
* @param value The bytes for allocatorType to set.
* @return This builder for chaining.
*/
public Builder setAllocatorTypeBytes(
org.nd4j.shade.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
allocatorType_ = value;
onChanged();
return this;
}
private long deferredDeletionBytes_ ;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
* @return The deferredDeletionBytes.
*/
@java.lang.Override
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
* @param value The deferredDeletionBytes to set.
* @return This builder for chaining.
*/
public Builder setDeferredDeletionBytes(long value) {
deferredDeletionBytes_ = value;
onChanged();
return this;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
* @return The bytes for visibleDeviceList.
*/
public org.nd4j.shade.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
* @param value The visibleDeviceList to set.
* @return This builder for chaining.
*/
public Builder setVisibleDeviceList(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
visibleDeviceList_ = value;
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
* @return This builder for chaining.
*/
public Builder clearVisibleDeviceList() {
visibleDeviceList_ = getDefaultInstance().getVisibleDeviceList();
onChanged();
return this;
}
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
* @param value The bytes for visibleDeviceList to set.
* @return This builder for chaining.
*/
public Builder setVisibleDeviceListBytes(
org.nd4j.shade.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
visibleDeviceList_ = value;
onChanged();
return this;
}
private int pollingActiveDelayUsecs_ ;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
* @return The pollingActiveDelayUsecs.
*/
@java.lang.Override
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
* @param value The pollingActiveDelayUsecs to set.
* @return This builder for chaining.
*/
public Builder setPollingActiveDelayUsecs(int value) {
pollingActiveDelayUsecs_ = value;
onChanged();
return this;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
* @return This builder for chaining.
*/
public Builder clearPollingActiveDelayUsecs() {
pollingActiveDelayUsecs_ = 0;
onChanged();
return this;
}
private int pollingInactiveDelayMsecs_ ;
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
* @return The pollingInactiveDelayMsecs.
*/
@java.lang.Override
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
* @param value The pollingInactiveDelayMsecs to set.
* @return This builder for chaining.
*/
public Builder setPollingInactiveDelayMsecs(int value) {
pollingInactiveDelayMsecs_ = value;
onChanged();
return this;
}
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
* @return This builder for chaining.
*/
public Builder clearPollingInactiveDelayMsecs() {
pollingInactiveDelayMsecs_ = 0;
onChanged();
return this;
}
private boolean forceGpuCompatible_ ;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
* @param value The forceGpuCompatible to set.
* @return This builder for chaining.
*/
public Builder setForceGpuCompatible(boolean value) {
forceGpuCompatible_ = value;
onChanged();
return this;
}
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
* @return Whether the experimental field is set.
*/
public boolean hasExperimental() {
return experimentalBuilder_ != null || experimental_ != null;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder setExperimental(org.tensorflow.framework.GPUOptions.Experimental value) {
if (experimentalBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
experimental_ = value;
onChanged();
} else {
experimentalBuilder_.setMessage(value);
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
private org.nd4j.shade.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>
getExperimentalFieldBuilder() {
if (experimentalBuilder_ == null) {
experimentalBuilder_ = new org.nd4j.shade.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>(
getExperimental(),
getParentForChildren(),
isClean());
experimental_ = null;
}
return experimentalBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
private static final org.tensorflow.framework.GPUOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions();
}
public static org.tensorflow.framework.GPUOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
@java.lang.Override
public GPUOptions parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new GPUOptions(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}