All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.tensorflow.framework.GPUOptions Maven / Gradle / Ivy

There is a newer version: 1.15.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto

package org.tensorflow.framework;

/**
 * Protobuf type {@code tensorflow.GPUOptions}
 */
public  final class GPUOptions extends
    com.google.protobuf.GeneratedMessageV3 implements
    // @@protoc_insertion_point(message_implements:tensorflow.GPUOptions)
    GPUOptionsOrBuilder {
private static final long serialVersionUID = 0L;
  // Use GPUOptions.newBuilder() to construct.
  private GPUOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) {
    super(builder);
  }
  private GPUOptions() {
    perProcessGpuMemoryFraction_ = 0D;
    allowGrowth_ = false;
    allocatorType_ = "";
    deferredDeletionBytes_ = 0L;
    visibleDeviceList_ = "";
    pollingActiveDelayUsecs_ = 0;
    pollingInactiveDelayMsecs_ = 0;
    forceGpuCompatible_ = false;
  }

  @java.lang.Override
  public final com.google.protobuf.UnknownFieldSet
  getUnknownFields() {
    return this.unknownFields;
  }
  private GPUOptions(
      com.google.protobuf.CodedInputStream input,
      com.google.protobuf.ExtensionRegistryLite extensionRegistry)
      throws com.google.protobuf.InvalidProtocolBufferException {
    this();
    if (extensionRegistry == null) {
      throw new java.lang.NullPointerException();
    }
    int mutable_bitField0_ = 0;
    com.google.protobuf.UnknownFieldSet.Builder unknownFields =
        com.google.protobuf.UnknownFieldSet.newBuilder();
    try {
      boolean done = false;
      while (!done) {
        int tag = input.readTag();
        switch (tag) {
          case 0:
            done = true;
            break;
          default: {
            if (!parseUnknownFieldProto3(
                input, unknownFields, extensionRegistry, tag)) {
              done = true;
            }
            break;
          }
          case 9: {

            perProcessGpuMemoryFraction_ = input.readDouble();
            break;
          }
          case 18: {
            java.lang.String s = input.readStringRequireUtf8();

            allocatorType_ = s;
            break;
          }
          case 24: {

            deferredDeletionBytes_ = input.readInt64();
            break;
          }
          case 32: {

            allowGrowth_ = input.readBool();
            break;
          }
          case 42: {
            java.lang.String s = input.readStringRequireUtf8();

            visibleDeviceList_ = s;
            break;
          }
          case 48: {

            pollingActiveDelayUsecs_ = input.readInt32();
            break;
          }
          case 56: {

            pollingInactiveDelayMsecs_ = input.readInt32();
            break;
          }
          case 64: {

            forceGpuCompatible_ = input.readBool();
            break;
          }
          case 74: {
            org.tensorflow.framework.GPUOptions.Experimental.Builder subBuilder = null;
            if (experimental_ != null) {
              subBuilder = experimental_.toBuilder();
            }
            experimental_ = input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.parser(), extensionRegistry);
            if (subBuilder != null) {
              subBuilder.mergeFrom(experimental_);
              experimental_ = subBuilder.buildPartial();
            }

            break;
          }
        }
      }
    } catch (com.google.protobuf.InvalidProtocolBufferException e) {
      throw e.setUnfinishedMessage(this);
    } catch (java.io.IOException e) {
      throw new com.google.protobuf.InvalidProtocolBufferException(
          e).setUnfinishedMessage(this);
    } finally {
      this.unknownFields = unknownFields.build();
      makeExtensionsImmutable();
    }
  }
  public static final com.google.protobuf.Descriptors.Descriptor
      getDescriptor() {
    return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor;
  }

  protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
      internalGetFieldAccessorTable() {
    return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable
        .ensureFieldAccessorsInitialized(
            org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class);
  }

  public interface ExperimentalOrBuilder extends
      // @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental)
      com.google.protobuf.MessageOrBuilder {

    /**
     * 
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ java.util.List getVirtualDevicesList(); /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index); /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ int getVirtualDevicesCount(); /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ java.util.List getVirtualDevicesOrBuilderList(); /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder( int index); /** *
     * If true, uses CUDA unified memory for memory allocations. If
     * per_process_gpu_memory_fraction option is greater than 1.0, then unified
     * memory is used regardless of the value for this field. See comments for
     * per_process_gpu_memory_fraction field for more details and requirements
     * of the unified memory. This option is useful to oversubscribe memory if
     * multiple processes are sharing a single GPU while individually using less
     * than 1.0 per process memory fraction.
     * 
* * bool use_unified_memory = 2; */ boolean getUseUnifiedMemory(); /** *
     * If > 1, the number of device-to-device copy streams to create
     * for each GPUDevice.  Default value is 0, which is automatically
     * converted to 1.
     * 
* * int32 num_dev_to_dev_copy_streams = 3; */ int getNumDevToDevCopyStreams(); /** *
     * If non-empty, defines a good GPU ring order on a single worker based on
     * device interconnect.  This assumes that all workers have the same GPU
     * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
     * This ring order is used by the RingReducer implementation of
     * CollectiveReduce, and serves as an override to automatic ring order
     * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
     * 
* * string collective_ring_order = 4; */ java.lang.String getCollectiveRingOrder(); /** *
     * If non-empty, defines a good GPU ring order on a single worker based on
     * device interconnect.  This assumes that all workers have the same GPU
     * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
     * This ring order is used by the RingReducer implementation of
     * CollectiveReduce, and serves as an override to automatic ring order
     * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
     * 
* * string collective_ring_order = 4; */ com.google.protobuf.ByteString getCollectiveRingOrderBytes(); /** *
     * If true then extra work is done by GPUDevice and GPUBFCAllocator to
     * keep track of when GPU memory is freed and when kernels actually
     * complete so that we can know when a nominally free memory chunk
     * is really not subject to pending use.
     * 
* * bool timestamped_allocator = 5; */ boolean getTimestampedAllocator(); /** *
     * Parameters for GPUKernelTracker.  By default no kernel tracking is done.
     * Note that timestamped_allocator is only effective if some tracking is
     * specified.
     * If kernel_tracker_max_interval = n > 0, then a tracking event
     * is inserted after every n kernels without an event.
     * 
* * int32 kernel_tracker_max_interval = 7; */ int getKernelTrackerMaxInterval(); /** *
     * If kernel_tracker_max_bytes = n > 0, then a tracking event is
     * inserted after every series of kernels allocating a sum of
     * memory >= n.  If one kernel allocates b * n bytes, then one
     * event will be inserted after it, but it will count as b against
     * the pending limit.
     * 
* * int32 kernel_tracker_max_bytes = 8; */ int getKernelTrackerMaxBytes(); /** *
     * If kernel_tracker_max_pending > 0 then no more than this many
     * tracking events can be outstanding at a time.  An attempt to
     * launch an additional kernel will stall until an event
     * completes.
     * 
* * int32 kernel_tracker_max_pending = 9; */ int getKernelTrackerMaxPending(); } /** * Protobuf type {@code tensorflow.GPUOptions.Experimental} */ public static final class Experimental extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental) ExperimentalOrBuilder { private static final long serialVersionUID = 0L; // Use Experimental.newBuilder() to construct. private Experimental(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private Experimental() { virtualDevices_ = java.util.Collections.emptyList(); useUnifiedMemory_ = false; numDevToDevCopyStreams_ = 0; collectiveRingOrder_ = ""; timestampedAllocator_ = false; kernelTrackerMaxInterval_ = 0; kernelTrackerMaxBytes_ = 0; kernelTrackerMaxPending_ = 0; } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Experimental( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownFieldProto3( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { virtualDevices_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } virtualDevices_.add( input.readMessage(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.parser(), extensionRegistry)); break; } case 16: { useUnifiedMemory_ = input.readBool(); break; } case 24: { numDevToDevCopyStreams_ = input.readInt32(); break; } case 34: { java.lang.String s = input.readStringRequireUtf8(); collectiveRingOrder_ = s; break; } case 40: { timestampedAllocator_ = input.readBool(); break; } case 56: { kernelTrackerMaxInterval_ = input.readInt32(); break; } case 64: { kernelTrackerMaxBytes_ = input.readInt32(); break; } case 72: { kernelTrackerMaxPending_ = input.readInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable .ensureFieldAccessorsInitialized( org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class); } public interface VirtualDevicesOrBuilder extends // @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions.Experimental.VirtualDevices) com.google.protobuf.MessageOrBuilder { /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ java.util.List getMemoryLimitMbList(); /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ int getMemoryLimitMbCount(); /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ float getMemoryLimitMb(int index); } /** *
     * Configuration for breaking down a visible GPU into multiple "virtual"
     * devices.
     * 
* * Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices} */ public static final class VirtualDevices extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:tensorflow.GPUOptions.Experimental.VirtualDevices) VirtualDevicesOrBuilder { private static final long serialVersionUID = 0L; // Use VirtualDevices.newBuilder() to construct. private VirtualDevices(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private VirtualDevices() { memoryLimitMb_ = java.util.Collections.emptyList(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private VirtualDevices( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownFieldProto3( input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 13: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { memoryLimitMb_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } memoryLimitMb_.add(input.readFloat()); break; } case 10: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { memoryLimitMb_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } while (input.getBytesUntilLimit() > 0) { memoryLimitMb_.add(input.readFloat()); } input.popLimit(limit); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable .ensureFieldAccessorsInitialized( org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class); } public static final int MEMORY_LIMIT_MB_FIELD_NUMBER = 1; private java.util.List memoryLimitMb_; /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ public java.util.List getMemoryLimitMbList() { return memoryLimitMb_; } /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ public int getMemoryLimitMbCount() { return memoryLimitMb_.size(); } /** *
       * Per "virtual" device memory limit, in MB. The number of elements in
       * the list is the number of virtual devices to create on the
       * corresponding visible GPU (see "virtual_devices" below).
       * If empty, it will create single virtual device taking all available
       * memory from the device.
       * For the concept of "visible" and "virtual" GPU, see the comments for
       * "visible_device_list" above for more information.
       * 
* * repeated float memory_limit_mb = 1; */ public float getMemoryLimitMb(int index) { return memoryLimitMb_.get(index); } private int memoryLimitMbMemoizedSerializedSize = -1; private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (getMemoryLimitMbList().size() > 0) { output.writeUInt32NoTag(10); output.writeUInt32NoTag(memoryLimitMbMemoizedSerializedSize); } for (int i = 0; i < memoryLimitMb_.size(); i++) { output.writeFloatNoTag(memoryLimitMb_.get(i)); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; { int dataSize = 0; dataSize = 4 * getMemoryLimitMbList().size(); size += dataSize; if (!getMemoryLimitMbList().isEmpty()) { size += 1; size += com.google.protobuf.CodedOutputStream .computeInt32SizeNoTag(dataSize); } memoryLimitMbMemoizedSerializedSize = dataSize; } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)) { return super.equals(obj); } org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) obj; boolean result = true; result = result && getMemoryLimitMbList() .equals(other.getMemoryLimitMbList()); result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getMemoryLimitMbCount() > 0) { hash = (37 * hash) + MEMORY_LIMIT_MB_FIELD_NUMBER; hash = (53 * hash) + getMemoryLimitMbList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
       * Configuration for breaking down a visible GPU into multiple "virtual"
       * devices.
       * 
* * Protobuf type {@code tensorflow.GPUOptions.Experimental.VirtualDevices} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental.VirtualDevices) org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_fieldAccessorTable .ensureFieldAccessorsInitialized( org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.class, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder.class); } // Construct using org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); memoryLimitMb_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_VirtualDevices_descriptor; } public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() { return org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance(); } public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices build() { org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices buildPartial() { org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices result = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) == 0x00000001)) { memoryLimitMb_ = java.util.Collections.unmodifiableList(memoryLimitMb_); bitField0_ = (bitField0_ & ~0x00000001); } result.memoryLimitMb_ = memoryLimitMb_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) { return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices other) { if (other == org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()) return this; if (!other.memoryLimitMb_.isEmpty()) { if (memoryLimitMb_.isEmpty()) { memoryLimitMb_ = other.memoryLimitMb_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureMemoryLimitMbIsMutable(); memoryLimitMb_.addAll(other.memoryLimitMb_); } onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List memoryLimitMb_ = java.util.Collections.emptyList(); private void ensureMemoryLimitMbIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { memoryLimitMb_ = new java.util.ArrayList(memoryLimitMb_); bitField0_ |= 0x00000001; } } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public java.util.List getMemoryLimitMbList() { return java.util.Collections.unmodifiableList(memoryLimitMb_); } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public int getMemoryLimitMbCount() { return memoryLimitMb_.size(); } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public float getMemoryLimitMb(int index) { return memoryLimitMb_.get(index); } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public Builder setMemoryLimitMb( int index, float value) { ensureMemoryLimitMbIsMutable(); memoryLimitMb_.set(index, value); onChanged(); return this; } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public Builder addMemoryLimitMb(float value) { ensureMemoryLimitMbIsMutable(); memoryLimitMb_.add(value); onChanged(); return this; } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public Builder addAllMemoryLimitMb( java.lang.Iterable values) { ensureMemoryLimitMbIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, memoryLimitMb_); onChanged(); return this; } /** *
         * Per "virtual" device memory limit, in MB. The number of elements in
         * the list is the number of virtual devices to create on the
         * corresponding visible GPU (see "virtual_devices" below).
         * If empty, it will create single virtual device taking all available
         * memory from the device.
         * For the concept of "visible" and "virtual" GPU, see the comments for
         * "visible_device_list" above for more information.
         * 
* * repeated float memory_limit_mb = 1; */ public Builder clearMemoryLimitMb() { memoryLimitMb_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) } // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental.VirtualDevices) private static final org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices(); } public static org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public VirtualDevices parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new VirtualDevices(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int bitField0_; public static final int VIRTUAL_DEVICES_FIELD_NUMBER = 1; private java.util.List virtualDevices_; /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public java.util.List getVirtualDevicesList() { return virtualDevices_; } /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public java.util.List getVirtualDevicesOrBuilderList() { return virtualDevices_; } /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public int getVirtualDevicesCount() { return virtualDevices_.size(); } /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index) { return virtualDevices_.get(index); } /** *
     * The multi virtual device settings. If empty (not set), it will create
     * single virtual device on each visible GPU, according to the settings
     * in "visible_device_list" above. Otherwise, the number of elements in the
     * list must be the same as the number of visible GPUs (after
     * "visible_device_list" filtering if it is set), and the string represented
     * device names (e.g. /device:GPU:<id>) will refer to the virtual
     * devices and have the <id> field assigned sequentially starting from 0,
     * according to the order they appear in this list and the "memory_limit"
     * list inside each element. For example,
     *   visible_device_list = "1,0"
     *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
     *   virtual_devices {}
     * will create three virtual devices as:
     *   /device:GPU:0 -> visible GPU 1 with 1GB memory
     *   /device:GPU:1 -> visible GPU 1 with 2GB memory
     *   /device:GPU:2 -> visible GPU 0 with all available memory
     * NOTE:
     * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
     *    at the same time.
     * 2. Currently this setting is per-process, not per-session. Using
     *    different settings in different sessions within same process will
     *    result in undefined behavior.
     * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder( int index) { return virtualDevices_.get(index); } public static final int USE_UNIFIED_MEMORY_FIELD_NUMBER = 2; private boolean useUnifiedMemory_; /** *
     * If true, uses CUDA unified memory for memory allocations. If
     * per_process_gpu_memory_fraction option is greater than 1.0, then unified
     * memory is used regardless of the value for this field. See comments for
     * per_process_gpu_memory_fraction field for more details and requirements
     * of the unified memory. This option is useful to oversubscribe memory if
     * multiple processes are sharing a single GPU while individually using less
     * than 1.0 per process memory fraction.
     * 
* * bool use_unified_memory = 2; */ public boolean getUseUnifiedMemory() { return useUnifiedMemory_; } public static final int NUM_DEV_TO_DEV_COPY_STREAMS_FIELD_NUMBER = 3; private int numDevToDevCopyStreams_; /** *
     * If > 1, the number of device-to-device copy streams to create
     * for each GPUDevice.  Default value is 0, which is automatically
     * converted to 1.
     * 
* * int32 num_dev_to_dev_copy_streams = 3; */ public int getNumDevToDevCopyStreams() { return numDevToDevCopyStreams_; } public static final int COLLECTIVE_RING_ORDER_FIELD_NUMBER = 4; private volatile java.lang.Object collectiveRingOrder_; /** *
     * If non-empty, defines a good GPU ring order on a single worker based on
     * device interconnect.  This assumes that all workers have the same GPU
     * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
     * This ring order is used by the RingReducer implementation of
     * CollectiveReduce, and serves as an override to automatic ring order
     * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
     * 
* * string collective_ring_order = 4; */ public java.lang.String getCollectiveRingOrder() { java.lang.Object ref = collectiveRingOrder_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); collectiveRingOrder_ = s; return s; } } /** *
     * If non-empty, defines a good GPU ring order on a single worker based on
     * device interconnect.  This assumes that all workers have the same GPU
     * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
     * This ring order is used by the RingReducer implementation of
     * CollectiveReduce, and serves as an override to automatic ring order
     * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
     * 
* * string collective_ring_order = 4; */ public com.google.protobuf.ByteString getCollectiveRingOrderBytes() { java.lang.Object ref = collectiveRingOrder_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); collectiveRingOrder_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int TIMESTAMPED_ALLOCATOR_FIELD_NUMBER = 5; private boolean timestampedAllocator_; /** *
     * If true then extra work is done by GPUDevice and GPUBFCAllocator to
     * keep track of when GPU memory is freed and when kernels actually
     * complete so that we can know when a nominally free memory chunk
     * is really not subject to pending use.
     * 
* * bool timestamped_allocator = 5; */ public boolean getTimestampedAllocator() { return timestampedAllocator_; } public static final int KERNEL_TRACKER_MAX_INTERVAL_FIELD_NUMBER = 7; private int kernelTrackerMaxInterval_; /** *
     * Parameters for GPUKernelTracker.  By default no kernel tracking is done.
     * Note that timestamped_allocator is only effective if some tracking is
     * specified.
     * If kernel_tracker_max_interval = n > 0, then a tracking event
     * is inserted after every n kernels without an event.
     * 
* * int32 kernel_tracker_max_interval = 7; */ public int getKernelTrackerMaxInterval() { return kernelTrackerMaxInterval_; } public static final int KERNEL_TRACKER_MAX_BYTES_FIELD_NUMBER = 8; private int kernelTrackerMaxBytes_; /** *
     * If kernel_tracker_max_bytes = n > 0, then a tracking event is
     * inserted after every series of kernels allocating a sum of
     * memory >= n.  If one kernel allocates b * n bytes, then one
     * event will be inserted after it, but it will count as b against
     * the pending limit.
     * 
* * int32 kernel_tracker_max_bytes = 8; */ public int getKernelTrackerMaxBytes() { return kernelTrackerMaxBytes_; } public static final int KERNEL_TRACKER_MAX_PENDING_FIELD_NUMBER = 9; private int kernelTrackerMaxPending_; /** *
     * If kernel_tracker_max_pending > 0 then no more than this many
     * tracking events can be outstanding at a time.  An attempt to
     * launch an additional kernel will stall until an event
     * completes.
     * 
* * int32 kernel_tracker_max_pending = 9; */ public int getKernelTrackerMaxPending() { return kernelTrackerMaxPending_; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { for (int i = 0; i < virtualDevices_.size(); i++) { output.writeMessage(1, virtualDevices_.get(i)); } if (useUnifiedMemory_ != false) { output.writeBool(2, useUnifiedMemory_); } if (numDevToDevCopyStreams_ != 0) { output.writeInt32(3, numDevToDevCopyStreams_); } if (!getCollectiveRingOrderBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 4, collectiveRingOrder_); } if (timestampedAllocator_ != false) { output.writeBool(5, timestampedAllocator_); } if (kernelTrackerMaxInterval_ != 0) { output.writeInt32(7, kernelTrackerMaxInterval_); } if (kernelTrackerMaxBytes_ != 0) { output.writeInt32(8, kernelTrackerMaxBytes_); } if (kernelTrackerMaxPending_ != 0) { output.writeInt32(9, kernelTrackerMaxPending_); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; for (int i = 0; i < virtualDevices_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, virtualDevices_.get(i)); } if (useUnifiedMemory_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, useUnifiedMemory_); } if (numDevToDevCopyStreams_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, numDevToDevCopyStreams_); } if (!getCollectiveRingOrderBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, collectiveRingOrder_); } if (timestampedAllocator_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(5, timestampedAllocator_); } if (kernelTrackerMaxInterval_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(7, kernelTrackerMaxInterval_); } if (kernelTrackerMaxBytes_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(8, kernelTrackerMaxBytes_); } if (kernelTrackerMaxPending_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(9, kernelTrackerMaxPending_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.tensorflow.framework.GPUOptions.Experimental)) { return super.equals(obj); } org.tensorflow.framework.GPUOptions.Experimental other = (org.tensorflow.framework.GPUOptions.Experimental) obj; boolean result = true; result = result && getVirtualDevicesList() .equals(other.getVirtualDevicesList()); result = result && (getUseUnifiedMemory() == other.getUseUnifiedMemory()); result = result && (getNumDevToDevCopyStreams() == other.getNumDevToDevCopyStreams()); result = result && getCollectiveRingOrder() .equals(other.getCollectiveRingOrder()); result = result && (getTimestampedAllocator() == other.getTimestampedAllocator()); result = result && (getKernelTrackerMaxInterval() == other.getKernelTrackerMaxInterval()); result = result && (getKernelTrackerMaxBytes() == other.getKernelTrackerMaxBytes()); result = result && (getKernelTrackerMaxPending() == other.getKernelTrackerMaxPending()); result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (getVirtualDevicesCount() > 0) { hash = (37 * hash) + VIRTUAL_DEVICES_FIELD_NUMBER; hash = (53 * hash) + getVirtualDevicesList().hashCode(); } hash = (37 * hash) + USE_UNIFIED_MEMORY_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getUseUnifiedMemory()); hash = (37 * hash) + NUM_DEV_TO_DEV_COPY_STREAMS_FIELD_NUMBER; hash = (53 * hash) + getNumDevToDevCopyStreams(); hash = (37 * hash) + COLLECTIVE_RING_ORDER_FIELD_NUMBER; hash = (53 * hash) + getCollectiveRingOrder().hashCode(); hash = (37 * hash) + TIMESTAMPED_ALLOCATOR_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getTimestampedAllocator()); hash = (37 * hash) + KERNEL_TRACKER_MAX_INTERVAL_FIELD_NUMBER; hash = (53 * hash) + getKernelTrackerMaxInterval(); hash = (37 * hash) + KERNEL_TRACKER_MAX_BYTES_FIELD_NUMBER; hash = (53 * hash) + getKernelTrackerMaxBytes(); hash = (37 * hash) + KERNEL_TRACKER_MAX_PENDING_FIELD_NUMBER; hash = (53 * hash) + getKernelTrackerMaxPending(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions.Experimental parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.tensorflow.framework.GPUOptions.Experimental prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code tensorflow.GPUOptions.Experimental} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions.Experimental) org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_fieldAccessorTable .ensureFieldAccessorsInitialized( org.tensorflow.framework.GPUOptions.Experimental.class, org.tensorflow.framework.GPUOptions.Experimental.Builder.class); } // Construct using org.tensorflow.framework.GPUOptions.Experimental.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getVirtualDevicesFieldBuilder(); } } public Builder clear() { super.clear(); if (virtualDevicesBuilder_ == null) { virtualDevices_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { virtualDevicesBuilder_.clear(); } useUnifiedMemory_ = false; numDevToDevCopyStreams_ = 0; collectiveRingOrder_ = ""; timestampedAllocator_ = false; kernelTrackerMaxInterval_ = 0; kernelTrackerMaxBytes_ = 0; kernelTrackerMaxPending_ = 0; return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_Experimental_descriptor; } public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() { return org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance(); } public org.tensorflow.framework.GPUOptions.Experimental build() { org.tensorflow.framework.GPUOptions.Experimental result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.tensorflow.framework.GPUOptions.Experimental buildPartial() { org.tensorflow.framework.GPUOptions.Experimental result = new org.tensorflow.framework.GPUOptions.Experimental(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (virtualDevicesBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { virtualDevices_ = java.util.Collections.unmodifiableList(virtualDevices_); bitField0_ = (bitField0_ & ~0x00000001); } result.virtualDevices_ = virtualDevices_; } else { result.virtualDevices_ = virtualDevicesBuilder_.build(); } result.useUnifiedMemory_ = useUnifiedMemory_; result.numDevToDevCopyStreams_ = numDevToDevCopyStreams_; result.collectiveRingOrder_ = collectiveRingOrder_; result.timestampedAllocator_ = timestampedAllocator_; result.kernelTrackerMaxInterval_ = kernelTrackerMaxInterval_; result.kernelTrackerMaxBytes_ = kernelTrackerMaxBytes_; result.kernelTrackerMaxPending_ = kernelTrackerMaxPending_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.tensorflow.framework.GPUOptions.Experimental) { return mergeFrom((org.tensorflow.framework.GPUOptions.Experimental)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.tensorflow.framework.GPUOptions.Experimental other) { if (other == org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance()) return this; if (virtualDevicesBuilder_ == null) { if (!other.virtualDevices_.isEmpty()) { if (virtualDevices_.isEmpty()) { virtualDevices_ = other.virtualDevices_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureVirtualDevicesIsMutable(); virtualDevices_.addAll(other.virtualDevices_); } onChanged(); } } else { if (!other.virtualDevices_.isEmpty()) { if (virtualDevicesBuilder_.isEmpty()) { virtualDevicesBuilder_.dispose(); virtualDevicesBuilder_ = null; virtualDevices_ = other.virtualDevices_; bitField0_ = (bitField0_ & ~0x00000001); virtualDevicesBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getVirtualDevicesFieldBuilder() : null; } else { virtualDevicesBuilder_.addAllMessages(other.virtualDevices_); } } } if (other.getUseUnifiedMemory() != false) { setUseUnifiedMemory(other.getUseUnifiedMemory()); } if (other.getNumDevToDevCopyStreams() != 0) { setNumDevToDevCopyStreams(other.getNumDevToDevCopyStreams()); } if (!other.getCollectiveRingOrder().isEmpty()) { collectiveRingOrder_ = other.collectiveRingOrder_; onChanged(); } if (other.getTimestampedAllocator() != false) { setTimestampedAllocator(other.getTimestampedAllocator()); } if (other.getKernelTrackerMaxInterval() != 0) { setKernelTrackerMaxInterval(other.getKernelTrackerMaxInterval()); } if (other.getKernelTrackerMaxBytes() != 0) { setKernelTrackerMaxBytes(other.getKernelTrackerMaxBytes()); } if (other.getKernelTrackerMaxPending() != 0) { setKernelTrackerMaxPending(other.getKernelTrackerMaxPending()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.tensorflow.framework.GPUOptions.Experimental parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.tensorflow.framework.GPUOptions.Experimental) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List virtualDevices_ = java.util.Collections.emptyList(); private void ensureVirtualDevicesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { virtualDevices_ = new java.util.ArrayList(virtualDevices_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder> virtualDevicesBuilder_; /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public java.util.List getVirtualDevicesList() { if (virtualDevicesBuilder_ == null) { return java.util.Collections.unmodifiableList(virtualDevices_); } else { return virtualDevicesBuilder_.getMessageList(); } } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public int getVirtualDevicesCount() { if (virtualDevicesBuilder_ == null) { return virtualDevices_.size(); } else { return virtualDevicesBuilder_.getCount(); } } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices getVirtualDevices(int index) { if (virtualDevicesBuilder_ == null) { return virtualDevices_.get(index); } else { return virtualDevicesBuilder_.getMessage(index); } } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder setVirtualDevices( int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) { if (virtualDevicesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureVirtualDevicesIsMutable(); virtualDevices_.set(index, value); onChanged(); } else { virtualDevicesBuilder_.setMessage(index, value); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder setVirtualDevices( int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) { if (virtualDevicesBuilder_ == null) { ensureVirtualDevicesIsMutable(); virtualDevices_.set(index, builderForValue.build()); onChanged(); } else { virtualDevicesBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder addVirtualDevices(org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) { if (virtualDevicesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureVirtualDevicesIsMutable(); virtualDevices_.add(value); onChanged(); } else { virtualDevicesBuilder_.addMessage(value); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder addVirtualDevices( int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices value) { if (virtualDevicesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureVirtualDevicesIsMutable(); virtualDevices_.add(index, value); onChanged(); } else { virtualDevicesBuilder_.addMessage(index, value); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder addVirtualDevices( org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) { if (virtualDevicesBuilder_ == null) { ensureVirtualDevicesIsMutable(); virtualDevices_.add(builderForValue.build()); onChanged(); } else { virtualDevicesBuilder_.addMessage(builderForValue.build()); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder addVirtualDevices( int index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder builderForValue) { if (virtualDevicesBuilder_ == null) { ensureVirtualDevicesIsMutable(); virtualDevices_.add(index, builderForValue.build()); onChanged(); } else { virtualDevicesBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder addAllVirtualDevices( java.lang.Iterable values) { if (virtualDevicesBuilder_ == null) { ensureVirtualDevicesIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, virtualDevices_); onChanged(); } else { virtualDevicesBuilder_.addAllMessages(values); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder clearVirtualDevices() { if (virtualDevicesBuilder_ == null) { virtualDevices_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { virtualDevicesBuilder_.clear(); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public Builder removeVirtualDevices(int index) { if (virtualDevicesBuilder_ == null) { ensureVirtualDevicesIsMutable(); virtualDevices_.remove(index); onChanged(); } else { virtualDevicesBuilder_.remove(index); } return this; } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder getVirtualDevicesBuilder( int index) { return getVirtualDevicesFieldBuilder().getBuilder(index); } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder getVirtualDevicesOrBuilder( int index) { if (virtualDevicesBuilder_ == null) { return virtualDevices_.get(index); } else { return virtualDevicesBuilder_.getMessageOrBuilder(index); } } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public java.util.List getVirtualDevicesOrBuilderList() { if (virtualDevicesBuilder_ != null) { return virtualDevicesBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(virtualDevices_); } } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder() { return getVirtualDevicesFieldBuilder().addBuilder( org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()); } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder addVirtualDevicesBuilder( int index) { return getVirtualDevicesFieldBuilder().addBuilder( index, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.getDefaultInstance()); } /** *
       * The multi virtual device settings. If empty (not set), it will create
       * single virtual device on each visible GPU, according to the settings
       * in "visible_device_list" above. Otherwise, the number of elements in the
       * list must be the same as the number of visible GPUs (after
       * "visible_device_list" filtering if it is set), and the string represented
       * device names (e.g. /device:GPU:<id>) will refer to the virtual
       * devices and have the <id> field assigned sequentially starting from 0,
       * according to the order they appear in this list and the "memory_limit"
       * list inside each element. For example,
       *   visible_device_list = "1,0"
       *   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
       *   virtual_devices {}
       * will create three virtual devices as:
       *   /device:GPU:0 -> visible GPU 1 with 1GB memory
       *   /device:GPU:1 -> visible GPU 1 with 2GB memory
       *   /device:GPU:2 -> visible GPU 0 with all available memory
       * NOTE:
       * 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
       *    at the same time.
       * 2. Currently this setting is per-process, not per-session. Using
       *    different settings in different sessions within same process will
       *    result in undefined behavior.
       * 
* * repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1; */ public java.util.List getVirtualDevicesBuilderList() { return getVirtualDevicesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder> getVirtualDevicesFieldBuilder() { if (virtualDevicesBuilder_ == null) { virtualDevicesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevices.Builder, org.tensorflow.framework.GPUOptions.Experimental.VirtualDevicesOrBuilder>( virtualDevices_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); virtualDevices_ = null; } return virtualDevicesBuilder_; } private boolean useUnifiedMemory_ ; /** *
       * If true, uses CUDA unified memory for memory allocations. If
       * per_process_gpu_memory_fraction option is greater than 1.0, then unified
       * memory is used regardless of the value for this field. See comments for
       * per_process_gpu_memory_fraction field for more details and requirements
       * of the unified memory. This option is useful to oversubscribe memory if
       * multiple processes are sharing a single GPU while individually using less
       * than 1.0 per process memory fraction.
       * 
* * bool use_unified_memory = 2; */ public boolean getUseUnifiedMemory() { return useUnifiedMemory_; } /** *
       * If true, uses CUDA unified memory for memory allocations. If
       * per_process_gpu_memory_fraction option is greater than 1.0, then unified
       * memory is used regardless of the value for this field. See comments for
       * per_process_gpu_memory_fraction field for more details and requirements
       * of the unified memory. This option is useful to oversubscribe memory if
       * multiple processes are sharing a single GPU while individually using less
       * than 1.0 per process memory fraction.
       * 
* * bool use_unified_memory = 2; */ public Builder setUseUnifiedMemory(boolean value) { useUnifiedMemory_ = value; onChanged(); return this; } /** *
       * If true, uses CUDA unified memory for memory allocations. If
       * per_process_gpu_memory_fraction option is greater than 1.0, then unified
       * memory is used regardless of the value for this field. See comments for
       * per_process_gpu_memory_fraction field for more details and requirements
       * of the unified memory. This option is useful to oversubscribe memory if
       * multiple processes are sharing a single GPU while individually using less
       * than 1.0 per process memory fraction.
       * 
* * bool use_unified_memory = 2; */ public Builder clearUseUnifiedMemory() { useUnifiedMemory_ = false; onChanged(); return this; } private int numDevToDevCopyStreams_ ; /** *
       * If > 1, the number of device-to-device copy streams to create
       * for each GPUDevice.  Default value is 0, which is automatically
       * converted to 1.
       * 
* * int32 num_dev_to_dev_copy_streams = 3; */ public int getNumDevToDevCopyStreams() { return numDevToDevCopyStreams_; } /** *
       * If > 1, the number of device-to-device copy streams to create
       * for each GPUDevice.  Default value is 0, which is automatically
       * converted to 1.
       * 
* * int32 num_dev_to_dev_copy_streams = 3; */ public Builder setNumDevToDevCopyStreams(int value) { numDevToDevCopyStreams_ = value; onChanged(); return this; } /** *
       * If > 1, the number of device-to-device copy streams to create
       * for each GPUDevice.  Default value is 0, which is automatically
       * converted to 1.
       * 
* * int32 num_dev_to_dev_copy_streams = 3; */ public Builder clearNumDevToDevCopyStreams() { numDevToDevCopyStreams_ = 0; onChanged(); return this; } private java.lang.Object collectiveRingOrder_ = ""; /** *
       * If non-empty, defines a good GPU ring order on a single worker based on
       * device interconnect.  This assumes that all workers have the same GPU
       * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       * This ring order is used by the RingReducer implementation of
       * CollectiveReduce, and serves as an override to automatic ring order
       * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       * 
* * string collective_ring_order = 4; */ public java.lang.String getCollectiveRingOrder() { java.lang.Object ref = collectiveRingOrder_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); collectiveRingOrder_ = s; return s; } else { return (java.lang.String) ref; } } /** *
       * If non-empty, defines a good GPU ring order on a single worker based on
       * device interconnect.  This assumes that all workers have the same GPU
       * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       * This ring order is used by the RingReducer implementation of
       * CollectiveReduce, and serves as an override to automatic ring order
       * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       * 
* * string collective_ring_order = 4; */ public com.google.protobuf.ByteString getCollectiveRingOrderBytes() { java.lang.Object ref = collectiveRingOrder_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); collectiveRingOrder_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** *
       * If non-empty, defines a good GPU ring order on a single worker based on
       * device interconnect.  This assumes that all workers have the same GPU
       * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       * This ring order is used by the RingReducer implementation of
       * CollectiveReduce, and serves as an override to automatic ring order
       * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       * 
* * string collective_ring_order = 4; */ public Builder setCollectiveRingOrder( java.lang.String value) { if (value == null) { throw new NullPointerException(); } collectiveRingOrder_ = value; onChanged(); return this; } /** *
       * If non-empty, defines a good GPU ring order on a single worker based on
       * device interconnect.  This assumes that all workers have the same GPU
       * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       * This ring order is used by the RingReducer implementation of
       * CollectiveReduce, and serves as an override to automatic ring order
       * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       * 
* * string collective_ring_order = 4; */ public Builder clearCollectiveRingOrder() { collectiveRingOrder_ = getDefaultInstance().getCollectiveRingOrder(); onChanged(); return this; } /** *
       * If non-empty, defines a good GPU ring order on a single worker based on
       * device interconnect.  This assumes that all workers have the same GPU
       * topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
       * This ring order is used by the RingReducer implementation of
       * CollectiveReduce, and serves as an override to automatic ring order
       * generation in OrderTaskDeviceMap() during CollectiveParam resolution.
       * 
* * string collective_ring_order = 4; */ public Builder setCollectiveRingOrderBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); collectiveRingOrder_ = value; onChanged(); return this; } private boolean timestampedAllocator_ ; /** *
       * If true then extra work is done by GPUDevice and GPUBFCAllocator to
       * keep track of when GPU memory is freed and when kernels actually
       * complete so that we can know when a nominally free memory chunk
       * is really not subject to pending use.
       * 
* * bool timestamped_allocator = 5; */ public boolean getTimestampedAllocator() { return timestampedAllocator_; } /** *
       * If true then extra work is done by GPUDevice and GPUBFCAllocator to
       * keep track of when GPU memory is freed and when kernels actually
       * complete so that we can know when a nominally free memory chunk
       * is really not subject to pending use.
       * 
* * bool timestamped_allocator = 5; */ public Builder setTimestampedAllocator(boolean value) { timestampedAllocator_ = value; onChanged(); return this; } /** *
       * If true then extra work is done by GPUDevice and GPUBFCAllocator to
       * keep track of when GPU memory is freed and when kernels actually
       * complete so that we can know when a nominally free memory chunk
       * is really not subject to pending use.
       * 
* * bool timestamped_allocator = 5; */ public Builder clearTimestampedAllocator() { timestampedAllocator_ = false; onChanged(); return this; } private int kernelTrackerMaxInterval_ ; /** *
       * Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       * Note that timestamped_allocator is only effective if some tracking is
       * specified.
       * If kernel_tracker_max_interval = n > 0, then a tracking event
       * is inserted after every n kernels without an event.
       * 
* * int32 kernel_tracker_max_interval = 7; */ public int getKernelTrackerMaxInterval() { return kernelTrackerMaxInterval_; } /** *
       * Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       * Note that timestamped_allocator is only effective if some tracking is
       * specified.
       * If kernel_tracker_max_interval = n > 0, then a tracking event
       * is inserted after every n kernels without an event.
       * 
* * int32 kernel_tracker_max_interval = 7; */ public Builder setKernelTrackerMaxInterval(int value) { kernelTrackerMaxInterval_ = value; onChanged(); return this; } /** *
       * Parameters for GPUKernelTracker.  By default no kernel tracking is done.
       * Note that timestamped_allocator is only effective if some tracking is
       * specified.
       * If kernel_tracker_max_interval = n > 0, then a tracking event
       * is inserted after every n kernels without an event.
       * 
* * int32 kernel_tracker_max_interval = 7; */ public Builder clearKernelTrackerMaxInterval() { kernelTrackerMaxInterval_ = 0; onChanged(); return this; } private int kernelTrackerMaxBytes_ ; /** *
       * If kernel_tracker_max_bytes = n > 0, then a tracking event is
       * inserted after every series of kernels allocating a sum of
       * memory >= n.  If one kernel allocates b * n bytes, then one
       * event will be inserted after it, but it will count as b against
       * the pending limit.
       * 
* * int32 kernel_tracker_max_bytes = 8; */ public int getKernelTrackerMaxBytes() { return kernelTrackerMaxBytes_; } /** *
       * If kernel_tracker_max_bytes = n > 0, then a tracking event is
       * inserted after every series of kernels allocating a sum of
       * memory >= n.  If one kernel allocates b * n bytes, then one
       * event will be inserted after it, but it will count as b against
       * the pending limit.
       * 
* * int32 kernel_tracker_max_bytes = 8; */ public Builder setKernelTrackerMaxBytes(int value) { kernelTrackerMaxBytes_ = value; onChanged(); return this; } /** *
       * If kernel_tracker_max_bytes = n > 0, then a tracking event is
       * inserted after every series of kernels allocating a sum of
       * memory >= n.  If one kernel allocates b * n bytes, then one
       * event will be inserted after it, but it will count as b against
       * the pending limit.
       * 
* * int32 kernel_tracker_max_bytes = 8; */ public Builder clearKernelTrackerMaxBytes() { kernelTrackerMaxBytes_ = 0; onChanged(); return this; } private int kernelTrackerMaxPending_ ; /** *
       * If kernel_tracker_max_pending > 0 then no more than this many
       * tracking events can be outstanding at a time.  An attempt to
       * launch an additional kernel will stall until an event
       * completes.
       * 
* * int32 kernel_tracker_max_pending = 9; */ public int getKernelTrackerMaxPending() { return kernelTrackerMaxPending_; } /** *
       * If kernel_tracker_max_pending > 0 then no more than this many
       * tracking events can be outstanding at a time.  An attempt to
       * launch an additional kernel will stall until an event
       * completes.
       * 
* * int32 kernel_tracker_max_pending = 9; */ public Builder setKernelTrackerMaxPending(int value) { kernelTrackerMaxPending_ = value; onChanged(); return this; } /** *
       * If kernel_tracker_max_pending > 0 then no more than this many
       * tracking events can be outstanding at a time.  An attempt to
       * launch an additional kernel will stall until an event
       * completes.
       * 
* * int32 kernel_tracker_max_pending = 9; */ public Builder clearKernelTrackerMaxPending() { kernelTrackerMaxPending_ = 0; onChanged(); return this; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions.Experimental) } // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions.Experimental) private static final org.tensorflow.framework.GPUOptions.Experimental DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions.Experimental(); } public static org.tensorflow.framework.GPUOptions.Experimental getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public Experimental parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new Experimental(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } public org.tensorflow.framework.GPUOptions.Experimental getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public static final int PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER = 1; private double perProcessGpuMemoryFraction_; /** *
   * Fraction of the available GPU memory to allocate for each process.
   * 1 means to allocate all of the GPU memory, 0.5 means the process
   * allocates up to ~50% of the available GPU memory.
   * GPU memory is pre-allocated unless the allow_growth option is enabled.
   * If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
   * the amount of memory available on the GPU device by using host memory as a
   * swap space. Accessing memory not available on the device will be
   * significantly slower as that would require memory transfer between the host
   * and the device. Options to reduce the memory requirement should be
   * considered before enabling this option as this may come with a negative
   * performance impact. Oversubscription using the unified memory requires
   * Pascal class or newer GPUs and it is currently only supported on the Linux
   * operating system. See
   * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
   * for the detailed requirements.
   * 
* * double per_process_gpu_memory_fraction = 1; */ public double getPerProcessGpuMemoryFraction() { return perProcessGpuMemoryFraction_; } public static final int ALLOW_GROWTH_FIELD_NUMBER = 4; private boolean allowGrowth_; /** *
   * If true, the allocator does not pre-allocate the entire specified
   * GPU memory region, instead starting small and growing as needed.
   * 
* * bool allow_growth = 4; */ public boolean getAllowGrowth() { return allowGrowth_; } public static final int ALLOCATOR_TYPE_FIELD_NUMBER = 2; private volatile java.lang.Object allocatorType_; /** *
   * The type of GPU allocation strategy to use.
   * Allowed values:
   * "": The empty string (default) uses a system-chosen default
   *     which may change over time.
   * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
   *        version of dlmalloc.
   * 
* * string allocator_type = 2; */ public java.lang.String getAllocatorType() { java.lang.Object ref = allocatorType_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); allocatorType_ = s; return s; } } /** *
   * The type of GPU allocation strategy to use.
   * Allowed values:
   * "": The empty string (default) uses a system-chosen default
   *     which may change over time.
   * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
   *        version of dlmalloc.
   * 
* * string allocator_type = 2; */ public com.google.protobuf.ByteString getAllocatorTypeBytes() { java.lang.Object ref = allocatorType_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); allocatorType_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int DEFERRED_DELETION_BYTES_FIELD_NUMBER = 3; private long deferredDeletionBytes_; /** *
   * Delay deletion of up to this many bytes to reduce the number of
   * interactions with gpu driver code.  If 0, the system chooses
   * a reasonable default (several MBs).
   * 
* * int64 deferred_deletion_bytes = 3; */ public long getDeferredDeletionBytes() { return deferredDeletionBytes_; } public static final int VISIBLE_DEVICE_LIST_FIELD_NUMBER = 5; private volatile java.lang.Object visibleDeviceList_; /** *
   * A comma-separated list of GPU ids that determines the 'visible'
   * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
   * can see 8 GPU devices in the process, and one wanted to map
   * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
   * then one would specify this field as "5,3".  This field is similar in
   * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
   * it applies to the visible GPU devices in the process.
   * NOTE:
   * 1. The GPU driver provides the process with the visible GPUs
   *    in an order which is not guaranteed to have any correlation to
   *    the *physical* GPU id in the machine.  This field is used for
   *    remapping "visible" to "virtual", which means this operates only
   *    after the process starts.  Users are required to use vendor
   *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
   *    physical to visible device mapping prior to invoking TensorFlow.
   * 2. In the code, the ids in this list are also called "platform GPU id"s,
   *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
   *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
   *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
   *    for more information.
   * 
* * string visible_device_list = 5; */ public java.lang.String getVisibleDeviceList() { java.lang.Object ref = visibleDeviceList_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); visibleDeviceList_ = s; return s; } } /** *
   * A comma-separated list of GPU ids that determines the 'visible'
   * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
   * can see 8 GPU devices in the process, and one wanted to map
   * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
   * then one would specify this field as "5,3".  This field is similar in
   * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
   * it applies to the visible GPU devices in the process.
   * NOTE:
   * 1. The GPU driver provides the process with the visible GPUs
   *    in an order which is not guaranteed to have any correlation to
   *    the *physical* GPU id in the machine.  This field is used for
   *    remapping "visible" to "virtual", which means this operates only
   *    after the process starts.  Users are required to use vendor
   *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
   *    physical to visible device mapping prior to invoking TensorFlow.
   * 2. In the code, the ids in this list are also called "platform GPU id"s,
   *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
   *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
   *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
   *    for more information.
   * 
* * string visible_device_list = 5; */ public com.google.protobuf.ByteString getVisibleDeviceListBytes() { java.lang.Object ref = visibleDeviceList_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); visibleDeviceList_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER = 6; private int pollingActiveDelayUsecs_; /** *
   * In the event polling loop sleep this many microseconds between
   * PollEvents calls, when the queue is not empty.  If value is not
   * set or set to 0, gets set to a non-zero default.
   * 
* * int32 polling_active_delay_usecs = 6; */ public int getPollingActiveDelayUsecs() { return pollingActiveDelayUsecs_; } public static final int POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER = 7; private int pollingInactiveDelayMsecs_; /** *
   * This field is deprecated and ignored.
   * 
* * int32 polling_inactive_delay_msecs = 7; */ public int getPollingInactiveDelayMsecs() { return pollingInactiveDelayMsecs_; } public static final int FORCE_GPU_COMPATIBLE_FIELD_NUMBER = 8; private boolean forceGpuCompatible_; /** *
   * Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
   * enabling this option forces all CPU tensors to be allocated with Cuda
   * pinned memory. Normally, TensorFlow will infer which tensors should be
   * allocated as the pinned memory. But in case where the inference is
   * incomplete, this option can significantly speed up the cross-device memory
   * copy performance as long as it fits the memory.
   * Note that this option is not something that should be
   * enabled by default for unknown or very large models, since all Cuda pinned
   * memory is unpageable, having too much pinned memory might negatively impact
   * the overall host system performance.
   * 
* * bool force_gpu_compatible = 8; */ public boolean getForceGpuCompatible() { return forceGpuCompatible_; } public static final int EXPERIMENTAL_FIELD_NUMBER = 9; private org.tensorflow.framework.GPUOptions.Experimental experimental_; /** *
   * Everything inside experimental is subject to change and is not subject
   * to API stability guarantees in
   * https://www.tensorflow.org/guide/version_compat.
   * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public boolean hasExperimental() { return experimental_ != null; } /** *
   * Everything inside experimental is subject to change and is not subject
   * to API stability guarantees in
   * https://www.tensorflow.org/guide/version_compat.
   * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public org.tensorflow.framework.GPUOptions.Experimental getExperimental() { return experimental_ == null ? org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_; } /** *
   * Everything inside experimental is subject to change and is not subject
   * to API stability guarantees in
   * https://www.tensorflow.org/guide/version_compat.
   * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder() { return getExperimental(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (perProcessGpuMemoryFraction_ != 0D) { output.writeDouble(1, perProcessGpuMemoryFraction_); } if (!getAllocatorTypeBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, allocatorType_); } if (deferredDeletionBytes_ != 0L) { output.writeInt64(3, deferredDeletionBytes_); } if (allowGrowth_ != false) { output.writeBool(4, allowGrowth_); } if (!getVisibleDeviceListBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 5, visibleDeviceList_); } if (pollingActiveDelayUsecs_ != 0) { output.writeInt32(6, pollingActiveDelayUsecs_); } if (pollingInactiveDelayMsecs_ != 0) { output.writeInt32(7, pollingInactiveDelayMsecs_); } if (forceGpuCompatible_ != false) { output.writeBool(8, forceGpuCompatible_); } if (experimental_ != null) { output.writeMessage(9, getExperimental()); } unknownFields.writeTo(output); } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (perProcessGpuMemoryFraction_ != 0D) { size += com.google.protobuf.CodedOutputStream .computeDoubleSize(1, perProcessGpuMemoryFraction_); } if (!getAllocatorTypeBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, allocatorType_); } if (deferredDeletionBytes_ != 0L) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, deferredDeletionBytes_); } if (allowGrowth_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(4, allowGrowth_); } if (!getVisibleDeviceListBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, visibleDeviceList_); } if (pollingActiveDelayUsecs_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(6, pollingActiveDelayUsecs_); } if (pollingInactiveDelayMsecs_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(7, pollingInactiveDelayMsecs_); } if (forceGpuCompatible_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(8, forceGpuCompatible_); } if (experimental_ != null) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(9, getExperimental()); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.tensorflow.framework.GPUOptions)) { return super.equals(obj); } org.tensorflow.framework.GPUOptions other = (org.tensorflow.framework.GPUOptions) obj; boolean result = true; result = result && ( java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction()) == java.lang.Double.doubleToLongBits( other.getPerProcessGpuMemoryFraction())); result = result && (getAllowGrowth() == other.getAllowGrowth()); result = result && getAllocatorType() .equals(other.getAllocatorType()); result = result && (getDeferredDeletionBytes() == other.getDeferredDeletionBytes()); result = result && getVisibleDeviceList() .equals(other.getVisibleDeviceList()); result = result && (getPollingActiveDelayUsecs() == other.getPollingActiveDelayUsecs()); result = result && (getPollingInactiveDelayMsecs() == other.getPollingInactiveDelayMsecs()); result = result && (getForceGpuCompatible() == other.getForceGpuCompatible()); result = result && (hasExperimental() == other.hasExperimental()); if (hasExperimental()) { result = result && getExperimental() .equals(other.getExperimental()); } result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( java.lang.Double.doubleToLongBits(getPerProcessGpuMemoryFraction())); hash = (37 * hash) + ALLOW_GROWTH_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getAllowGrowth()); hash = (37 * hash) + ALLOCATOR_TYPE_FIELD_NUMBER; hash = (53 * hash) + getAllocatorType().hashCode(); hash = (37 * hash) + DEFERRED_DELETION_BYTES_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getDeferredDeletionBytes()); hash = (37 * hash) + VISIBLE_DEVICE_LIST_FIELD_NUMBER; hash = (53 * hash) + getVisibleDeviceList().hashCode(); hash = (37 * hash) + POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER; hash = (53 * hash) + getPollingActiveDelayUsecs(); hash = (37 * hash) + POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER; hash = (53 * hash) + getPollingInactiveDelayMsecs(); hash = (37 * hash) + FORCE_GPU_COMPATIBLE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getForceGpuCompatible()); if (hasExperimental()) { hash = (37 * hash) + EXPERIMENTAL_FIELD_NUMBER; hash = (53 * hash) + getExperimental().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static org.tensorflow.framework.GPUOptions parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static org.tensorflow.framework.GPUOptions parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static org.tensorflow.framework.GPUOptions parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static org.tensorflow.framework.GPUOptions parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static org.tensorflow.framework.GPUOptions parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.tensorflow.framework.GPUOptions prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code tensorflow.GPUOptions} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:tensorflow.GPUOptions) org.tensorflow.framework.GPUOptionsOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_fieldAccessorTable .ensureFieldAccessorsInitialized( org.tensorflow.framework.GPUOptions.class, org.tensorflow.framework.GPUOptions.Builder.class); } // Construct using org.tensorflow.framework.GPUOptions.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); perProcessGpuMemoryFraction_ = 0D; allowGrowth_ = false; allocatorType_ = ""; deferredDeletionBytes_ = 0L; visibleDeviceList_ = ""; pollingActiveDelayUsecs_ = 0; pollingInactiveDelayMsecs_ = 0; forceGpuCompatible_ = false; if (experimentalBuilder_ == null) { experimental_ = null; } else { experimental_ = null; experimentalBuilder_ = null; } return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_GPUOptions_descriptor; } public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() { return org.tensorflow.framework.GPUOptions.getDefaultInstance(); } public org.tensorflow.framework.GPUOptions build() { org.tensorflow.framework.GPUOptions result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public org.tensorflow.framework.GPUOptions buildPartial() { org.tensorflow.framework.GPUOptions result = new org.tensorflow.framework.GPUOptions(this); result.perProcessGpuMemoryFraction_ = perProcessGpuMemoryFraction_; result.allowGrowth_ = allowGrowth_; result.allocatorType_ = allocatorType_; result.deferredDeletionBytes_ = deferredDeletionBytes_; result.visibleDeviceList_ = visibleDeviceList_; result.pollingActiveDelayUsecs_ = pollingActiveDelayUsecs_; result.pollingInactiveDelayMsecs_ = pollingInactiveDelayMsecs_; result.forceGpuCompatible_ = forceGpuCompatible_; if (experimentalBuilder_ == null) { result.experimental_ = experimental_; } else { result.experimental_ = experimentalBuilder_.build(); } onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.tensorflow.framework.GPUOptions) { return mergeFrom((org.tensorflow.framework.GPUOptions)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(org.tensorflow.framework.GPUOptions other) { if (other == org.tensorflow.framework.GPUOptions.getDefaultInstance()) return this; if (other.getPerProcessGpuMemoryFraction() != 0D) { setPerProcessGpuMemoryFraction(other.getPerProcessGpuMemoryFraction()); } if (other.getAllowGrowth() != false) { setAllowGrowth(other.getAllowGrowth()); } if (!other.getAllocatorType().isEmpty()) { allocatorType_ = other.allocatorType_; onChanged(); } if (other.getDeferredDeletionBytes() != 0L) { setDeferredDeletionBytes(other.getDeferredDeletionBytes()); } if (!other.getVisibleDeviceList().isEmpty()) { visibleDeviceList_ = other.visibleDeviceList_; onChanged(); } if (other.getPollingActiveDelayUsecs() != 0) { setPollingActiveDelayUsecs(other.getPollingActiveDelayUsecs()); } if (other.getPollingInactiveDelayMsecs() != 0) { setPollingInactiveDelayMsecs(other.getPollingInactiveDelayMsecs()); } if (other.getForceGpuCompatible() != false) { setForceGpuCompatible(other.getForceGpuCompatible()); } if (other.hasExperimental()) { mergeExperimental(other.getExperimental()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.tensorflow.framework.GPUOptions parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.tensorflow.framework.GPUOptions) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private double perProcessGpuMemoryFraction_ ; /** *
     * Fraction of the available GPU memory to allocate for each process.
     * 1 means to allocate all of the GPU memory, 0.5 means the process
     * allocates up to ~50% of the available GPU memory.
     * GPU memory is pre-allocated unless the allow_growth option is enabled.
     * If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
     * the amount of memory available on the GPU device by using host memory as a
     * swap space. Accessing memory not available on the device will be
     * significantly slower as that would require memory transfer between the host
     * and the device. Options to reduce the memory requirement should be
     * considered before enabling this option as this may come with a negative
     * performance impact. Oversubscription using the unified memory requires
     * Pascal class or newer GPUs and it is currently only supported on the Linux
     * operating system. See
     * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
     * for the detailed requirements.
     * 
* * double per_process_gpu_memory_fraction = 1; */ public double getPerProcessGpuMemoryFraction() { return perProcessGpuMemoryFraction_; } /** *
     * Fraction of the available GPU memory to allocate for each process.
     * 1 means to allocate all of the GPU memory, 0.5 means the process
     * allocates up to ~50% of the available GPU memory.
     * GPU memory is pre-allocated unless the allow_growth option is enabled.
     * If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
     * the amount of memory available on the GPU device by using host memory as a
     * swap space. Accessing memory not available on the device will be
     * significantly slower as that would require memory transfer between the host
     * and the device. Options to reduce the memory requirement should be
     * considered before enabling this option as this may come with a negative
     * performance impact. Oversubscription using the unified memory requires
     * Pascal class or newer GPUs and it is currently only supported on the Linux
     * operating system. See
     * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
     * for the detailed requirements.
     * 
* * double per_process_gpu_memory_fraction = 1; */ public Builder setPerProcessGpuMemoryFraction(double value) { perProcessGpuMemoryFraction_ = value; onChanged(); return this; } /** *
     * Fraction of the available GPU memory to allocate for each process.
     * 1 means to allocate all of the GPU memory, 0.5 means the process
     * allocates up to ~50% of the available GPU memory.
     * GPU memory is pre-allocated unless the allow_growth option is enabled.
     * If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
     * the amount of memory available on the GPU device by using host memory as a
     * swap space. Accessing memory not available on the device will be
     * significantly slower as that would require memory transfer between the host
     * and the device. Options to reduce the memory requirement should be
     * considered before enabling this option as this may come with a negative
     * performance impact. Oversubscription using the unified memory requires
     * Pascal class or newer GPUs and it is currently only supported on the Linux
     * operating system. See
     * https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
     * for the detailed requirements.
     * 
* * double per_process_gpu_memory_fraction = 1; */ public Builder clearPerProcessGpuMemoryFraction() { perProcessGpuMemoryFraction_ = 0D; onChanged(); return this; } private boolean allowGrowth_ ; /** *
     * If true, the allocator does not pre-allocate the entire specified
     * GPU memory region, instead starting small and growing as needed.
     * 
* * bool allow_growth = 4; */ public boolean getAllowGrowth() { return allowGrowth_; } /** *
     * If true, the allocator does not pre-allocate the entire specified
     * GPU memory region, instead starting small and growing as needed.
     * 
* * bool allow_growth = 4; */ public Builder setAllowGrowth(boolean value) { allowGrowth_ = value; onChanged(); return this; } /** *
     * If true, the allocator does not pre-allocate the entire specified
     * GPU memory region, instead starting small and growing as needed.
     * 
* * bool allow_growth = 4; */ public Builder clearAllowGrowth() { allowGrowth_ = false; onChanged(); return this; } private java.lang.Object allocatorType_ = ""; /** *
     * The type of GPU allocation strategy to use.
     * Allowed values:
     * "": The empty string (default) uses a system-chosen default
     *     which may change over time.
     * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
     *        version of dlmalloc.
     * 
* * string allocator_type = 2; */ public java.lang.String getAllocatorType() { java.lang.Object ref = allocatorType_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); allocatorType_ = s; return s; } else { return (java.lang.String) ref; } } /** *
     * The type of GPU allocation strategy to use.
     * Allowed values:
     * "": The empty string (default) uses a system-chosen default
     *     which may change over time.
     * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
     *        version of dlmalloc.
     * 
* * string allocator_type = 2; */ public com.google.protobuf.ByteString getAllocatorTypeBytes() { java.lang.Object ref = allocatorType_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); allocatorType_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** *
     * The type of GPU allocation strategy to use.
     * Allowed values:
     * "": The empty string (default) uses a system-chosen default
     *     which may change over time.
     * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
     *        version of dlmalloc.
     * 
* * string allocator_type = 2; */ public Builder setAllocatorType( java.lang.String value) { if (value == null) { throw new NullPointerException(); } allocatorType_ = value; onChanged(); return this; } /** *
     * The type of GPU allocation strategy to use.
     * Allowed values:
     * "": The empty string (default) uses a system-chosen default
     *     which may change over time.
     * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
     *        version of dlmalloc.
     * 
* * string allocator_type = 2; */ public Builder clearAllocatorType() { allocatorType_ = getDefaultInstance().getAllocatorType(); onChanged(); return this; } /** *
     * The type of GPU allocation strategy to use.
     * Allowed values:
     * "": The empty string (default) uses a system-chosen default
     *     which may change over time.
     * "BFC": A "Best-fit with coalescing" algorithm, simplified from a
     *        version of dlmalloc.
     * 
* * string allocator_type = 2; */ public Builder setAllocatorTypeBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); allocatorType_ = value; onChanged(); return this; } private long deferredDeletionBytes_ ; /** *
     * Delay deletion of up to this many bytes to reduce the number of
     * interactions with gpu driver code.  If 0, the system chooses
     * a reasonable default (several MBs).
     * 
* * int64 deferred_deletion_bytes = 3; */ public long getDeferredDeletionBytes() { return deferredDeletionBytes_; } /** *
     * Delay deletion of up to this many bytes to reduce the number of
     * interactions with gpu driver code.  If 0, the system chooses
     * a reasonable default (several MBs).
     * 
* * int64 deferred_deletion_bytes = 3; */ public Builder setDeferredDeletionBytes(long value) { deferredDeletionBytes_ = value; onChanged(); return this; } /** *
     * Delay deletion of up to this many bytes to reduce the number of
     * interactions with gpu driver code.  If 0, the system chooses
     * a reasonable default (several MBs).
     * 
* * int64 deferred_deletion_bytes = 3; */ public Builder clearDeferredDeletionBytes() { deferredDeletionBytes_ = 0L; onChanged(); return this; } private java.lang.Object visibleDeviceList_ = ""; /** *
     * A comma-separated list of GPU ids that determines the 'visible'
     * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
     * can see 8 GPU devices in the process, and one wanted to map
     * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
     * then one would specify this field as "5,3".  This field is similar in
     * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
     * it applies to the visible GPU devices in the process.
     * NOTE:
     * 1. The GPU driver provides the process with the visible GPUs
     *    in an order which is not guaranteed to have any correlation to
     *    the *physical* GPU id in the machine.  This field is used for
     *    remapping "visible" to "virtual", which means this operates only
     *    after the process starts.  Users are required to use vendor
     *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
     *    physical to visible device mapping prior to invoking TensorFlow.
     * 2. In the code, the ids in this list are also called "platform GPU id"s,
     *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
     *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
     *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
     *    for more information.
     * 
* * string visible_device_list = 5; */ public java.lang.String getVisibleDeviceList() { java.lang.Object ref = visibleDeviceList_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); visibleDeviceList_ = s; return s; } else { return (java.lang.String) ref; } } /** *
     * A comma-separated list of GPU ids that determines the 'visible'
     * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
     * can see 8 GPU devices in the process, and one wanted to map
     * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
     * then one would specify this field as "5,3".  This field is similar in
     * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
     * it applies to the visible GPU devices in the process.
     * NOTE:
     * 1. The GPU driver provides the process with the visible GPUs
     *    in an order which is not guaranteed to have any correlation to
     *    the *physical* GPU id in the machine.  This field is used for
     *    remapping "visible" to "virtual", which means this operates only
     *    after the process starts.  Users are required to use vendor
     *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
     *    physical to visible device mapping prior to invoking TensorFlow.
     * 2. In the code, the ids in this list are also called "platform GPU id"s,
     *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
     *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
     *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
     *    for more information.
     * 
* * string visible_device_list = 5; */ public com.google.protobuf.ByteString getVisibleDeviceListBytes() { java.lang.Object ref = visibleDeviceList_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); visibleDeviceList_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** *
     * A comma-separated list of GPU ids that determines the 'visible'
     * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
     * can see 8 GPU devices in the process, and one wanted to map
     * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
     * then one would specify this field as "5,3".  This field is similar in
     * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
     * it applies to the visible GPU devices in the process.
     * NOTE:
     * 1. The GPU driver provides the process with the visible GPUs
     *    in an order which is not guaranteed to have any correlation to
     *    the *physical* GPU id in the machine.  This field is used for
     *    remapping "visible" to "virtual", which means this operates only
     *    after the process starts.  Users are required to use vendor
     *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
     *    physical to visible device mapping prior to invoking TensorFlow.
     * 2. In the code, the ids in this list are also called "platform GPU id"s,
     *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
     *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
     *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
     *    for more information.
     * 
* * string visible_device_list = 5; */ public Builder setVisibleDeviceList( java.lang.String value) { if (value == null) { throw new NullPointerException(); } visibleDeviceList_ = value; onChanged(); return this; } /** *
     * A comma-separated list of GPU ids that determines the 'visible'
     * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
     * can see 8 GPU devices in the process, and one wanted to map
     * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
     * then one would specify this field as "5,3".  This field is similar in
     * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
     * it applies to the visible GPU devices in the process.
     * NOTE:
     * 1. The GPU driver provides the process with the visible GPUs
     *    in an order which is not guaranteed to have any correlation to
     *    the *physical* GPU id in the machine.  This field is used for
     *    remapping "visible" to "virtual", which means this operates only
     *    after the process starts.  Users are required to use vendor
     *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
     *    physical to visible device mapping prior to invoking TensorFlow.
     * 2. In the code, the ids in this list are also called "platform GPU id"s,
     *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
     *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
     *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
     *    for more information.
     * 
* * string visible_device_list = 5; */ public Builder clearVisibleDeviceList() { visibleDeviceList_ = getDefaultInstance().getVisibleDeviceList(); onChanged(); return this; } /** *
     * A comma-separated list of GPU ids that determines the 'visible'
     * to 'virtual' mapping of GPU devices.  For example, if TensorFlow
     * can see 8 GPU devices in the process, and one wanted to map
     * visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
     * then one would specify this field as "5,3".  This field is similar in
     * spirit to the CUDA_VISIBLE_DEVICES environment variable, except
     * it applies to the visible GPU devices in the process.
     * NOTE:
     * 1. The GPU driver provides the process with the visible GPUs
     *    in an order which is not guaranteed to have any correlation to
     *    the *physical* GPU id in the machine.  This field is used for
     *    remapping "visible" to "virtual", which means this operates only
     *    after the process starts.  Users are required to use vendor
     *    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
     *    physical to visible device mapping prior to invoking TensorFlow.
     * 2. In the code, the ids in this list are also called "platform GPU id"s,
     *    and the 'virtual' ids of GPU devices (i.e. the ids in the device
     *    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
     *    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
     *    for more information.
     * 
* * string visible_device_list = 5; */ public Builder setVisibleDeviceListBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); visibleDeviceList_ = value; onChanged(); return this; } private int pollingActiveDelayUsecs_ ; /** *
     * In the event polling loop sleep this many microseconds between
     * PollEvents calls, when the queue is not empty.  If value is not
     * set or set to 0, gets set to a non-zero default.
     * 
* * int32 polling_active_delay_usecs = 6; */ public int getPollingActiveDelayUsecs() { return pollingActiveDelayUsecs_; } /** *
     * In the event polling loop sleep this many microseconds between
     * PollEvents calls, when the queue is not empty.  If value is not
     * set or set to 0, gets set to a non-zero default.
     * 
* * int32 polling_active_delay_usecs = 6; */ public Builder setPollingActiveDelayUsecs(int value) { pollingActiveDelayUsecs_ = value; onChanged(); return this; } /** *
     * In the event polling loop sleep this many microseconds between
     * PollEvents calls, when the queue is not empty.  If value is not
     * set or set to 0, gets set to a non-zero default.
     * 
* * int32 polling_active_delay_usecs = 6; */ public Builder clearPollingActiveDelayUsecs() { pollingActiveDelayUsecs_ = 0; onChanged(); return this; } private int pollingInactiveDelayMsecs_ ; /** *
     * This field is deprecated and ignored.
     * 
* * int32 polling_inactive_delay_msecs = 7; */ public int getPollingInactiveDelayMsecs() { return pollingInactiveDelayMsecs_; } /** *
     * This field is deprecated and ignored.
     * 
* * int32 polling_inactive_delay_msecs = 7; */ public Builder setPollingInactiveDelayMsecs(int value) { pollingInactiveDelayMsecs_ = value; onChanged(); return this; } /** *
     * This field is deprecated and ignored.
     * 
* * int32 polling_inactive_delay_msecs = 7; */ public Builder clearPollingInactiveDelayMsecs() { pollingInactiveDelayMsecs_ = 0; onChanged(); return this; } private boolean forceGpuCompatible_ ; /** *
     * Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
     * enabling this option forces all CPU tensors to be allocated with Cuda
     * pinned memory. Normally, TensorFlow will infer which tensors should be
     * allocated as the pinned memory. But in case where the inference is
     * incomplete, this option can significantly speed up the cross-device memory
     * copy performance as long as it fits the memory.
     * Note that this option is not something that should be
     * enabled by default for unknown or very large models, since all Cuda pinned
     * memory is unpageable, having too much pinned memory might negatively impact
     * the overall host system performance.
     * 
* * bool force_gpu_compatible = 8; */ public boolean getForceGpuCompatible() { return forceGpuCompatible_; } /** *
     * Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
     * enabling this option forces all CPU tensors to be allocated with Cuda
     * pinned memory. Normally, TensorFlow will infer which tensors should be
     * allocated as the pinned memory. But in case where the inference is
     * incomplete, this option can significantly speed up the cross-device memory
     * copy performance as long as it fits the memory.
     * Note that this option is not something that should be
     * enabled by default for unknown or very large models, since all Cuda pinned
     * memory is unpageable, having too much pinned memory might negatively impact
     * the overall host system performance.
     * 
* * bool force_gpu_compatible = 8; */ public Builder setForceGpuCompatible(boolean value) { forceGpuCompatible_ = value; onChanged(); return this; } /** *
     * Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
     * enabling this option forces all CPU tensors to be allocated with Cuda
     * pinned memory. Normally, TensorFlow will infer which tensors should be
     * allocated as the pinned memory. But in case where the inference is
     * incomplete, this option can significantly speed up the cross-device memory
     * copy performance as long as it fits the memory.
     * Note that this option is not something that should be
     * enabled by default for unknown or very large models, since all Cuda pinned
     * memory is unpageable, having too much pinned memory might negatively impact
     * the overall host system performance.
     * 
* * bool force_gpu_compatible = 8; */ public Builder clearForceGpuCompatible() { forceGpuCompatible_ = false; onChanged(); return this; } private org.tensorflow.framework.GPUOptions.Experimental experimental_ = null; private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder> experimentalBuilder_; /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public boolean hasExperimental() { return experimentalBuilder_ != null || experimental_ != null; } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public org.tensorflow.framework.GPUOptions.Experimental getExperimental() { if (experimentalBuilder_ == null) { return experimental_ == null ? org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_; } else { return experimentalBuilder_.getMessage(); } } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public Builder setExperimental(org.tensorflow.framework.GPUOptions.Experimental value) { if (experimentalBuilder_ == null) { if (value == null) { throw new NullPointerException(); } experimental_ = value; onChanged(); } else { experimentalBuilder_.setMessage(value); } return this; } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public Builder setExperimental( org.tensorflow.framework.GPUOptions.Experimental.Builder builderForValue) { if (experimentalBuilder_ == null) { experimental_ = builderForValue.build(); onChanged(); } else { experimentalBuilder_.setMessage(builderForValue.build()); } return this; } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public Builder mergeExperimental(org.tensorflow.framework.GPUOptions.Experimental value) { if (experimentalBuilder_ == null) { if (experimental_ != null) { experimental_ = org.tensorflow.framework.GPUOptions.Experimental.newBuilder(experimental_).mergeFrom(value).buildPartial(); } else { experimental_ = value; } onChanged(); } else { experimentalBuilder_.mergeFrom(value); } return this; } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public Builder clearExperimental() { if (experimentalBuilder_ == null) { experimental_ = null; onChanged(); } else { experimental_ = null; experimentalBuilder_ = null; } return this; } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public org.tensorflow.framework.GPUOptions.Experimental.Builder getExperimentalBuilder() { onChanged(); return getExperimentalFieldBuilder().getBuilder(); } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ public org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder() { if (experimentalBuilder_ != null) { return experimentalBuilder_.getMessageOrBuilder(); } else { return experimental_ == null ? org.tensorflow.framework.GPUOptions.Experimental.getDefaultInstance() : experimental_; } } /** *
     * Everything inside experimental is subject to change and is not subject
     * to API stability guarantees in
     * https://www.tensorflow.org/guide/version_compat.
     * 
* * .tensorflow.GPUOptions.Experimental experimental = 9; */ private com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder> getExperimentalFieldBuilder() { if (experimentalBuilder_ == null) { experimentalBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< org.tensorflow.framework.GPUOptions.Experimental, org.tensorflow.framework.GPUOptions.Experimental.Builder, org.tensorflow.framework.GPUOptions.ExperimentalOrBuilder>( getExperimental(), getParentForChildren(), isClean()); experimental_ = null; } return experimentalBuilder_; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:tensorflow.GPUOptions) } // @@protoc_insertion_point(class_scope:tensorflow.GPUOptions) private static final org.tensorflow.framework.GPUOptions DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new org.tensorflow.framework.GPUOptions(); } public static org.tensorflow.framework.GPUOptions getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public GPUOptions parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new GPUOptions(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } public org.tensorflow.framework.GPUOptions getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy