org.tensorflow.framework.GPUOptionsOrBuilder Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of tensorflow-client Show documentation
Show all versions of tensorflow-client Show documentation
Prebuilt tensorflow serving client
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
public interface GPUOptionsOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.GPUOptions)
com.google.protobuf.MessageOrBuilder {
/**
*
* A value between 0 and 1 that indicates what fraction of the
* available GPU memory to pre-allocate for each process. 1 means
* to pre-allocate all of the GPU memory, 0.5 means the process
* allocates ~50% of the available GPU memory.
*
*
* optional double per_process_gpu_memory_fraction = 1;
*/
double getPerProcessGpuMemoryFraction();
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* optional string allocator_type = 2;
*/
java.lang.String getAllocatorType();
/**
*
* The type of GPU allocation strategy to use.
* Allowed values:
* "": The empty string (default) uses a system-chosen default
* which may change over time.
* "BFC": A "Best-fit with coalescing" algorithm, simplified from a
* version of dlmalloc.
*
*
* optional string allocator_type = 2;
*/
com.google.protobuf.ByteString
getAllocatorTypeBytes();
/**
*
* Delay deletion of up to this many bytes to reduce the number of
* interactions with gpu driver code. If 0, the system chooses
* a reasonable default (several MBs).
*
*
* optional int64 deferred_deletion_bytes = 3;
*/
long getDeferredDeletionBytes();
/**
*
* If true, the allocator does not pre-allocate the entire specified
* GPU memory region, instead starting small and growing as needed.
*
*
* optional bool allow_growth = 4;
*/
boolean getAllowGrowth();
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* optional string visible_device_list = 5;
*/
java.lang.String getVisibleDeviceList();
/**
*
* A comma-separated list of GPU ids that determines the 'visible'
* to 'virtual' mapping of GPU devices. For example, if TensorFlow
* can see 8 GPU devices in the process, and one wanted to map
* visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1", then one
* would specify this field as "5,3". This field is similar in
* spirit to the CUDA_VISIBLE_DEVICES environment variable, except
* it applies to the visible GPU devices in the process.
* NOTE: The GPU driver provides the process with the visible GPUs
* in an order which is not guaranteed to have any correlation to
* the *physical* GPU id in the machine. This field is used for
* remapping "visible" to "virtual", which means this operates only
* after the process starts. Users are required to use vendor
* specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
* physical to visible device mapping prior to invoking TensorFlow.
*
*
* optional string visible_device_list = 5;
*/
com.google.protobuf.ByteString
getVisibleDeviceListBytes();
/**
*
* In the event polling loop sleep this many microseconds between
* PollEvents calls, when the queue is not empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* optional int32 polling_active_delay_usecs = 6;
*/
int getPollingActiveDelayUsecs();
/**
*
* In the event polling loop sleep this many millisconds between
* PollEvents calls, when the queue is empty. If value is not
* set or set to 0, gets set to a non-zero default.
*
*
* optional int32 polling_inactive_delay_msecs = 7;
*/
int getPollingInactiveDelayMsecs();
/**
*
* Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
* enabling this option forces all CPU tensors to be allocated with Cuda
* pinned memory. Normally, TensorFlow will infer which tensors should be
* allocated as the pinned memory. But in case where the inference is
* incomplete, this option can significantly speed up the cross-device memory
* copy performance as long as it fits the memory.
* Note that this option is not something that should be
* enabled by default for unknown or very large models, since all Cuda pinned
* memory is unpageable, having too much pinned memory might negatively impact
* the overall host system performance.
*
*
* optional bool force_gpu_compatible = 8;
*/
boolean getForceGpuCompatible();
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy