Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public final class GPUOptions extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions)
GPUOptionsOrBuilder {
private static final long serialVersionUID = 0L;
// Use GPUOptions.newBuilder() to construct.
private GPUOptions(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GPUOptions() {
perProcessGpuMemoryFraction_ = 0D;
allowGrowth_ = false;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GPUOptions(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
case 9: {
perProcessGpuMemoryFraction_ = input.readDouble();
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
allocatorType_ = s;
break;
}
case 24: {
deferredDeletionBytes_ = input.readInt64();
break;
}
case 32: {
allowGrowth_ = input.readBool();
break;
}
case 42: {
java.lang.String s = input.readStringRequireUtf8();
visibleDeviceList_ = s;
break;
}
case 48: {
pollingActiveDelayUsecs_ = input.readInt32();
break;
}
case 56: {
pollingInactiveDelayMsecs_ = input.readInt32();
break;
}
case 64: {
forceGpuCompatible_ = input.readBool();
break;
}
case 74: {
org.tensorflow.framework.GPUOptions.Experimental.Builder subBuilder = null;
if (experimental_ != null) {
subBuilder = experimental_.toBuilder();
}
experimental_ = input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(experimental_);
experimental_ = subBuilder.buildPartial();
}
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
public interface ExperimentalOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental)
org.nd4j.shade.protobuf.MessageOrBuilder {
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
boolean getUseUnifiedMemory();
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Experimental extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental)
ExperimentalOrBuilder {
private static final long serialVersionUID = 0L;
// Use Experimental.newBuilder() to construct.
private Experimental(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Experimental() {
virtualDevices_ = java.util.Collections.emptyList();
useUnifiedMemory_ = false;
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Experimental(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
virtualDevices_.add(
input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.parser(), extensionRegistry));
break;
}
case 16: {
useUnifiedMemory_ = input.readBool();
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
public interface VirtualDevicesOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental.VirtualDevices)
org.nd4j.shade.protobuf.MessageOrBuilder {
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class VirtualDevices extends
org.nd4j.shade.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
VirtualDevicesOrBuilder {
private static final long serialVersionUID = 0L;
// Use VirtualDevices.newBuilder() to construct.
private VirtualDevices(org.nd4j.shade.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private VirtualDevices() {
memoryLimitMb_ = java.util.Collections.emptyList();
}
@java.lang.Override
public final org.nd4j.shade.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private VirtualDevices(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
org.nd4j.shade.protobuf.UnknownFieldSet.Builder unknownFields =
org.nd4j.shade.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
case 13: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
memoryLimitMb_.add(input.readFloat());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
memoryLimitMb_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
memoryLimitMb_.add(input.readFloat());
}
input.popLimit(limit);
break;
}
}
}
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new org.nd4j.shade.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
public static final int MEMORY_LIMIT_MB_FIELD_NUMBER = 1;
private java.util.List memoryLimitMb_;
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public float getMemoryLimitMb(int index) {
return memoryLimitMb_.get(index);
}
private int memoryLimitMbMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.nd4j.shade.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getMemoryLimitMbList().size() > 0) {
output.writeUInt32NoTag(10);
output.writeUInt32NoTag(memoryLimitMbMemoizedSerializedSize);
}
for (int i = 0; i < memoryLimitMb_.size(); i++) {
output.writeFloatNoTag(memoryLimitMb_.get(i));
}
unknownFields.writeTo(output);
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
dataSize = 4 * getMemoryLimitMbList().size();
size += dataSize;
if (!getMemoryLimitMbList().isEmpty()) {
size += 1;
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
memoryLimitMbMemoizedSerializedSize = dataSize;
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) obj;
boolean result = true;
result = result && getMemoryLimitMbList()
.equals(other.getMemoryLimitMbList());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getMemoryLimitMbCount() > 0) {
hash = (37 * hash) + MEMORY_LIMIT_MB_FIELD_NUMBER;
hash = (53 * hash) + getMemoryLimitMbList().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.nio.ByteBuffer data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.ByteString data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.ByteString data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(byte[] data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
byte[] data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Configuration for breaking down a visible GPU into multiple "virtual"
* devices.
*
*
* Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices}
*/
public static final class Builder extends
org.nd4j.shade.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental.VirtualDevices)
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder {
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.nd4j.shade.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
public Builder clear() {
super.clear();
memoryLimitMb_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor;
}
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance();
}
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices build() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.memoryLimitMb_ = memoryLimitMb_;
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
org.nd4j.shade.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(org.nd4j.shade.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()) return this;
if (!other.memoryLimitMb_.isEmpty()) {
if (memoryLimitMb_.isEmpty()) {
memoryLimitMb_ = other.memoryLimitMb_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureMemoryLimitMbIsMutable();
memoryLimitMb_.addAll(other.memoryLimitMb_);
}
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List memoryLimitMb_ = java.util.Collections.emptyList();
private void ensureMemoryLimitMbIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
memoryLimitMb_ = new java.util.ArrayList(memoryLimitMb_);
bitField0_ |= 0x00000001;
}
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public int getMemoryLimitMbCount() {
return memoryLimitMb_.size();
}
/**
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
* Per "virtual" device memory limit, in MB. The number of elements in
* the list is the number of virtual devices to create on the
* corresponding visible GPU (see "virtual_devices" below).
* If empty, it will create single virtual device taking all available
* memory from the device.
* For the concept of "visible" and "virtual" GPU, see the comments for
* "visible_device_list" above for more information.
*
*
* repeated float memory_limit_mb = 1;
*/
public Builder clearMemoryLimitMb() {
memoryLimitMb_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices)
private static final org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices();
}
public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
public VirtualDevices parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new VirtualDevices(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private int bitField0_;
public static final int VIRTUAL_DEVICES_FIELD_NUMBER = 1;
private java.util.List virtualDevices_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public int getVirtualDevicesCount() {
return virtualDevices_.size();
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
return virtualDevices_.get(index);
}
public static final int USE_UNIFIED_MEMORY_FIELD_NUMBER = 2;
private boolean useUnifiedMemory_;
/**
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public boolean getUseUnifiedMemory() {
return useUnifiedMemory_;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.nd4j.shade.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < virtualDevices_.size(); i++) {
output.writeMessage(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
output.writeBool(2, useUnifiedMemory_);
}
unknownFields.writeTo(output);
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < virtualDevices_.size(); i++) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeMessageSize(1, virtualDevices_.get(i));
}
if (useUnifiedMemory_ != false) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeBoolSize(2, useUnifiedMemory_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions.Experimental other = (org.tensorflow.framework.GPUOptions.Experimental) obj;
boolean result = true;
result = result && getVirtualDevicesList()
.equals(other.getVirtualDevicesList());
result = result && (getUseUnifiedMemory()
== other.getUseUnifiedMemory());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getVirtualDevicesCount() > 0) {
hash = (37 * hash) + VIRTUAL_DEVICES_FIELD_NUMBER;
hash = (53 * hash) + getVirtualDevicesList().hashCode();
}
hash = (37 * hash) + USE_UNIFIED_MEMORY_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashBoolean(
getUseUnifiedMemory());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.nio.ByteBuffer data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.ByteString data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.ByteString data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(byte[] data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
byte[] data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions.Experimental}
*/
public static final class Builder extends
org.nd4j.shade.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental)
org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder {
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.Experimental.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.nd4j.shade.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getVirtualDevicesFieldBuilder();
}
}
public Builder clear() {
super.clear();
if (virtualDevicesBuilder_ == null) {
virtualDevices_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
virtualDevicesBuilder_.clear();
}
useUnifiedMemory_ = false;
return this;
}
public org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor;
}
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance();
}
public org.tensorflow.framework.GPUOptions.Experimental build() {
org.tensorflow.framework.GPUOptions.Experimental result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.tensorflow.framework.GPUOptions.Experimental buildPartial() {
org.tensorflow.framework.GPUOptions.Experimental result = new org.tensorflow.framework.GPUOptions.Experimental(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (virtualDevicesBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.virtualDevices_ = virtualDevices_;
} else {
result.virtualDevices_ = virtualDevicesBuilder_.build();
}
result.useUnifiedMemory_ = useUnifiedMemory_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
org.nd4j.shade.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(org.nd4j.shade.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions.Experimental) {
return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental other) {
if (other == org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance()) return this;
if (virtualDevicesBuilder_ == null) {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevices_.isEmpty()) {
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureVirtualDevicesIsMutable();
virtualDevices_.addAll(other.virtualDevices_);
}
onChanged();
}
} else {
if (!other.virtualDevices_.isEmpty()) {
if (virtualDevicesBuilder_.isEmpty()) {
virtualDevicesBuilder_.dispose();
virtualDevicesBuilder_ = null;
virtualDevices_ = other.virtualDevices_;
bitField0_ = (bitField0_ & ~0x00000001);
virtualDevicesBuilder_ =
org.nd4j.shade.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getVirtualDevicesFieldBuilder() : null;
} else {
virtualDevicesBuilder_.addAllMessages(other.virtualDevices_);
}
}
}
if (other.getUseUnifiedMemory() != false) {
setUseUnifiedMemory(other.getUseUnifiedMemory());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions.Experimental parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private java.util.List virtualDevices_ =
java.util.Collections.emptyList();
private void ensureVirtualDevicesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
virtualDevices_ = new java.util.ArrayList(virtualDevices_);
bitField0_ |= 0x00000001;
}
}
private org.nd4j.shade.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder> virtualDevicesBuilder_;
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public int getVirtualDevicesCount() {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.size();
} else {
return virtualDevicesBuilder_.getCount();
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder setVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.set(index, value);
onChanged();
} else {
virtualDevicesBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public Builder addVirtualDevices(
int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) {
if (virtualDevicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureVirtualDevicesIsMutable();
virtualDevices_.add(index, value);
onChanged();
} else {
virtualDevicesBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder getVirtualDevicesBuilder(
int index) {
return getVirtualDevicesFieldBuilder().getBuilder(index);
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
*
* repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;
*/
public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder(
int index) {
if (virtualDevicesBuilder_ == null) {
return virtualDevices_.get(index); } else {
return virtualDevicesBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* The multi virtual device settings. If empty (not set), it will create
* single virtual device on each visible GPU, according to the settings
* in "visible_device_list" above. Otherwise, the number of elements in the
* list must be the same as the number of visible GPUs (after
* "visible_device_list" filtering if it is set), and the string represented
* device names (e.g. /device:GPU:<id>) will refer to the virtual
* devices and have the <id> field assigned sequentially starting from 0,
* according to the order they appear in this list and the "memory_limit"
* list inside each element. For example,
* visible_device_list = "1,0"
* virtual_devices { memory_limit: 1GB memory_limit: 2GB }
* virtual_devices {}
* will create three virtual devices as:
* /device:GPU:0 -> visible GPU 1 with 1GB memory
* /device:GPU:1 -> visible GPU 1 with 2GB memory
* /device:GPU:2 -> visible GPU 0 with all available memory
* NOTE:
* 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
* at the same time.
* 2. Currently this setting is per-process, not per-session. Using
* different settings in different sessions within same process will
* result in undefined behavior.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
* If true, uses CUDA unified memory for memory allocations. If
* per_process_gpu_memory_fraction option is greater than 1.0, then unified
* memory is used regardless of the value for this field. See comments for
* per_process_gpu_memory_fraction field for more details and requirements
* of the unified memory. This option is useful to oversubscribe memory if
* multiple processes are sharing a single GPU while individually using less
* than 1.0 per process memory fraction.
*
*
* bool use_unified_memory = 2;
*/
public Builder clearUseUnifiedMemory() {
useUnifiedMemory_ = false;
onChanged();
return this;
}
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental)
private static final org.tensorflow.framework.GPUOptions.Experimental DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental();
}
public static org.tensorflow.framework.GPUOptions.Experimental getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
public Experimental parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new Experimental(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public static final int PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER = 1;
private double perProcessGpuMemoryFraction_;
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
*
* double per_process_gpu_memory_fraction = 1;
*/
public double getPerProcessGpuMemoryFraction() {
return perProcessGpuMemoryFraction_;
}
public static final int ALLOW_GROWTH_FIELD_NUMBER = 4;
private boolean allowGrowth_;
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* bool allow_growth = 4;
*/
public boolean getAllowGrowth() {
return allowGrowth_;
}
public static final int ALLOCATOR_TYPE_FIELD_NUMBER = 2;
private volatile java.lang.Object allocatorType_;
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public org.nd4j.shade.protobuf.ByteString
getAllocatorTypeBytes() {
java.lang.Object ref = allocatorType_;
if (ref instanceof java.lang.String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
allocatorType_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
public static final int DEFERRED_DELETION_BYTES_FIELD_NUMBER = 3;
private long deferredDeletionBytes_;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
public static final int VISIBLE_DEVICE_LIST_FIELD_NUMBER = 5;
private volatile java.lang.Object visibleDeviceList_;
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public org.nd4j.shade.protobuf.ByteString
getVisibleDeviceListBytes() {
java.lang.Object ref = visibleDeviceList_;
if (ref instanceof java.lang.String) {
org.nd4j.shade.protobuf.ByteString b =
org.nd4j.shade.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
visibleDeviceList_ = b;
return b;
} else {
return (org.nd4j.shade.protobuf.ByteString) ref;
}
}
public static final int POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER = 6;
private int pollingActiveDelayUsecs_;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
public static final int POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER = 7;
private int pollingInactiveDelayMsecs_;
/**
*
* This field is deprecated and ignored.
*
*
* int32 polling_inactive_delay_msecs = 7;
*/
public int getPollingInactiveDelayMsecs() {
return pollingInactiveDelayMsecs_;
}
public static final int FORCE_GPU_COMPATIBLE_FIELD_NUMBER = 8;
private boolean forceGpuCompatible_;
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* bool force_gpu_compatible = 8;
*/
public boolean getForceGpuCompatible() {
return forceGpuCompatible_;
}
public static final int EXPERIMENTAL_FIELD_NUMBER = 9;
private org.tensorflow.framework.GPUOptions.Experimental experimental_;
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder() {
return getExperimental();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(org.nd4j.shade.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (perProcessGpuMemoryFraction_ != 0D) {
output.writeDouble(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
org.nd4j.shade.protobuf.GeneratedMessageV3.writeString(output, 2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
output.writeInt64(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
output.writeBool(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
org.nd4j.shade.protobuf.GeneratedMessageV3.writeString(output, 5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
output.writeInt32(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
output.writeInt32(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
output.writeBool(8, forceGpuCompatible_);
}
if (experimental_ != null) {
output.writeMessage(9, getExperimental());
}
unknownFields.writeTo(output);
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (perProcessGpuMemoryFraction_ != 0D) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeDoubleSize(1, perProcessGpuMemoryFraction_);
}
if (!getAllocatorTypeBytes().isEmpty()) {
size += org.nd4j.shade.protobuf.GeneratedMessageV3.computeStringSize(2, allocatorType_);
}
if (deferredDeletionBytes_ != 0L) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeInt64Size(3, deferredDeletionBytes_);
}
if (allowGrowth_ != false) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeBoolSize(4, allowGrowth_);
}
if (!getVisibleDeviceListBytes().isEmpty()) {
size += org.nd4j.shade.protobuf.GeneratedMessageV3.computeStringSize(5, visibleDeviceList_);
}
if (pollingActiveDelayUsecs_ != 0) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeInt32Size(6, pollingActiveDelayUsecs_);
}
if (pollingInactiveDelayMsecs_ != 0) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeInt32Size(7, pollingInactiveDelayMsecs_);
}
if (forceGpuCompatible_ != false) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeBoolSize(8, forceGpuCompatible_);
}
if (experimental_ != null) {
size += org.nd4j.shade.protobuf.CodedOutputStream
.computeMessageSize(9, getExperimental());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.GPUOptions)) {
return super.equals(obj);
}
org.tensorflow.framework.GPUOptions other = (org.tensorflow.framework.GPUOptions) obj;
boolean result = true;
result = result && (
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction())
== java.lang.Double.doubleToLongBits(
other.getPerProcessGpuMemoryFraction()));
result = result && (getAllowGrowth()
== other.getAllowGrowth());
result = result && getAllocatorType()
.equals(other.getAllocatorType());
result = result && (getDeferredDeletionBytes()
== other.getDeferredDeletionBytes());
result = result && getVisibleDeviceList()
.equals(other.getVisibleDeviceList());
result = result && (getPollingActiveDelayUsecs()
== other.getPollingActiveDelayUsecs());
result = result && (getPollingInactiveDelayMsecs()
== other.getPollingInactiveDelayMsecs());
result = result && (getForceGpuCompatible()
== other.getForceGpuCompatible());
result = result && (hasExperimental() == other.hasExperimental());
if (hasExperimental()) {
result = result && getExperimental()
.equals(other.getExperimental());
}
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashLong(
java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction()));
hash = (37 * hash) + ALLOW_GROWTH_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashBoolean(
getAllowGrowth());
hash = (37 * hash) + ALLOCATOR_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getAllocatorType().hashCode();
hash = (37 * hash) + DEFERRED_DELETION_BYTES_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashLong(
getDeferredDeletionBytes());
hash = (37 * hash) + VISIBLE_DEVICE_LIST_FIELD_NUMBER;
hash = (53 * hash) + getVisibleDeviceList().hashCode();
hash = (37 * hash) + POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingActiveDelayUsecs();
hash = (37 * hash) + POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER;
hash = (53 * hash) + getPollingInactiveDelayMsecs();
hash = (37 * hash) + FORCE_GPU_COMPATIBLE_FIELD_NUMBER;
hash = (53 * hash) + org.nd4j.shade.protobuf.Internal.hashBoolean(
getForceGpuCompatible());
if (hasExperimental()) {
hash = (37 * hash) + EXPERIMENTAL_FIELD_NUMBER;
hash = (53 * hash) + getExperimental().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.nio.ByteBuffer data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
org.nd4j.shade.protobuf.ByteString data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
org.nd4j.shade.protobuf.ByteString data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(byte[] data)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
byte[] data,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(
java.io.InputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.GPUOptions parseFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return org.nd4j.shade.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.GPUOptions prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code tensorflow.GPUOptions}
*/
public static final class Builder extends
org.nd4j.shade.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions)
org.tensorflow.framework.GPUOptionsOrBuilder {
public static final org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
protected org.nd4j.shade.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
}
// Construct using org.tensorflow.framework.GPUOptions.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
org.nd4j.shade.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (org.nd4j.shade.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
public Builder clear() {
super.clear();
perProcessGpuMemoryFraction_ = 0D;
allowGrowth_ = false;
allocatorType_ = "";
deferredDeletionBytes_ = 0L;
visibleDeviceList_ = "";
pollingActiveDelayUsecs_ = 0;
pollingInactiveDelayMsecs_ = 0;
forceGpuCompatible_ = false;
if (experimentalBuilder_ == null) {
experimental_ = null;
} else {
experimental_ = null;
experimentalBuilder_ = null;
}
return this;
}
public org.nd4j.shade.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
}
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return org.tensorflow.framework.GPUOptions.getDefaultInstance();
}
public org.tensorflow.framework.GPUOptions build() {
org.tensorflow.framework.GPUOptions result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.tensorflow.framework.GPUOptions buildPartial() {
org.tensorflow.framework.GPUOptions result = new org.tensorflow.framework.GPUOptions(this);
result.perProcessGpuMemoryFraction_ = perProcessGpuMemoryFraction_;
result.allowGrowth_ = allowGrowth_;
result.allocatorType_ = allocatorType_;
result.deferredDeletionBytes_ = deferredDeletionBytes_;
result.visibleDeviceList_ = visibleDeviceList_;
result.pollingActiveDelayUsecs_ = pollingActiveDelayUsecs_;
result.pollingInactiveDelayMsecs_ = pollingInactiveDelayMsecs_;
result.forceGpuCompatible_ = forceGpuCompatible_;
if (experimentalBuilder_ == null) {
result.experimental_ = experimental_;
} else {
result.experimental_ = experimentalBuilder_.build();
}
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
org.nd4j.shade.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
org.nd4j.shade.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(org.nd4j.shade.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.GPUOptions) {
return mergeFrom((org.tensorflow.framework.GPUOptions)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.GPUOptions other) {
if (other == org.tensorflow.framework.GPUOptions.getDefaultInstance()) return this;
if (other.getPerProcessGpuMemoryFraction() != 0D) {
setPerProcessGpuMemoryFraction(other.getPerProcessGpuMemoryFraction());
}
if (other.getAllowGrowth() != false) {
setAllowGrowth(other.getAllowGrowth());
}
if (!other.getAllocatorType().isEmpty()) {
allocatorType_ = other.allocatorType_;
onChanged();
}
if (other.getDeferredDeletionBytes() != 0L) {
setDeferredDeletionBytes(other.getDeferredDeletionBytes());
}
if (!other.getVisibleDeviceList().isEmpty()) {
visibleDeviceList_ = other.visibleDeviceList_;
onChanged();
}
if (other.getPollingActiveDelayUsecs() != 0) {
setPollingActiveDelayUsecs(other.getPollingActiveDelayUsecs());
}
if (other.getPollingInactiveDelayMsecs() != 0) {
setPollingInactiveDelayMsecs(other.getPollingInactiveDelayMsecs());
}
if (other.getForceGpuCompatible() != false) {
setForceGpuCompatible(other.getForceGpuCompatible());
}
if (other.hasExperimental()) {
mergeExperimental(other.getExperimental());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.GPUOptions parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (org.nd4j.shade.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.GPUOptions) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private double perProcessGpuMemoryFraction_ ;
/**
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
* Fraction of the available GPU memory to allocate for each process.
* 1 means to allocate all of the GPU memory, 0.5 means the process
* allocates up to ~50% of the available GPU memory.
* GPU memory is pre-allocated unless the allow_growth option is enabled.
* If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
* the amount of memory available on the GPU device by using host memory as a
* swap space. Accessing memory not available on the device will be
* significantly slower as that would require memory transfer between the host
* and the device. Options to reduce the memory requirement should be
* considered before enabling this option as this may come with a negative
* performance impact. Oversubscription using the unified memory requires
* Pascal class or newer GPUs and it is currently only supported on the Linux
* operating system. See
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
* for the detailed requirements.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* string allocator_type = 2;
*/
public Builder setAllocatorTypeBytes(
org.nd4j.shade.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
allocatorType_ = value;
onChanged();
return this;
}
private long deferredDeletionBytes_ ;
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* int64 deferred_deletion_bytes = 3;
*/
public long getDeferredDeletionBytes() {
return deferredDeletionBytes_;
}
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
* then one would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE:
* 1. The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
* 2. In the code, the ids in this list are also called "CUDA GPU id"s,
* and the 'virtual' ids of GPU devices (i.e. the ids in the device
* name "/device:GPU:<id>") are also called "TF GPU id"s. Please
* refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
* for more information.
*
*
* string visible_device_list = 5;
*/
public Builder setVisibleDeviceListBytes(
org.nd4j.shade.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
visibleDeviceList_ = value;
onChanged();
return this;
}
private int pollingActiveDelayUsecs_ ;
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* int32 polling_active_delay_usecs = 6;
*/
public int getPollingActiveDelayUsecs() {
return pollingActiveDelayUsecs_;
}
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
public Builder setExperimental(org.tensorflow.framework.GPUOptions.Experimental value) {
if (experimentalBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
experimental_ = value;
onChanged();
} else {
experimentalBuilder_.setMessage(value);
}
return this;
}
/**
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
* Everything inside experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* .tensorflow.GPUOptions.Experimental experimental = 9;
*/
private org.nd4j.shade.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>
getExperimentalFieldBuilder() {
if (experimentalBuilder_ == null) {
experimentalBuilder_ = new org.nd4j.shade.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>(
getExperimental(),
getParentForChildren(),
isClean());
experimental_ = null;
}
return experimentalBuilder_;
}
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
private static final org.tensorflow.framework.GPUOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions();
}
public static org.tensorflow.framework.GPUOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
public GPUOptions parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new GPUOptions(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}