getSessionInterOpThreadPoolList();
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
org.tensorflow.framework.ThreadPoolOptionProto getSessionInterOpThreadPool(int index);
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
int getSessionInterOpThreadPoolCount();
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
java.util.List
getSessionInterOpThreadPoolOrBuilderList();
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder getSessionInterOpThreadPoolOrBuilder(
int index);
/**
*
* Assignment of Nodes to Devices is recomputed every placement_period
* steps until the system warms up (at which point the recomputation
* typically slows down automatically).
*
*
* int32 placement_period = 3;
*/
int getPlacementPeriod();
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
java.util.List
getDeviceFiltersList();
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
int getDeviceFiltersCount();
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
java.lang.String getDeviceFilters(int index);
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
com.github.os72.protobuf351.ByteString
getDeviceFiltersBytes(int index);
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
boolean hasGpuOptions();
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
org.tensorflow.framework.GPUOptions getGpuOptions();
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
org.tensorflow.framework.GPUOptionsOrBuilder getGpuOptionsOrBuilder();
/**
*
* Whether soft placement is allowed. If allow_soft_placement is true,
* an op will be placed on CPU if
* 1. there's no GPU implementation for the OP
* or
* 2. no GPU devices are known or registered
* or
* 3. need to co-locate with reftype input(s) which are from CPU.
*
*
* bool allow_soft_placement = 7;
*/
boolean getAllowSoftPlacement();
/**
*
* Whether device placements should be logged.
*
*
* bool log_device_placement = 8;
*/
boolean getLogDevicePlacement();
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
boolean hasGraphOptions();
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
org.tensorflow.framework.GraphOptions getGraphOptions();
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
org.tensorflow.framework.GraphOptionsOrBuilder getGraphOptionsOrBuilder();
/**
*
* Global timeout for all blocking operations in this session. If non-zero,
* and not overridden on a per-operation basis, this value will be used as the
* deadline for all blocking operations.
*
*
* int64 operation_timeout_in_ms = 11;
*/
long getOperationTimeoutInMs();
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
boolean hasRpcOptions();
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
org.tensorflow.framework.RPCOptions getRpcOptions();
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
org.tensorflow.framework.RPCOptionsOrBuilder getRpcOptionsOrBuilder();
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
boolean hasClusterDef();
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
org.tensorflow.distruntime.ClusterDef getClusterDef();
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
org.tensorflow.distruntime.ClusterDefOrBuilder getClusterDefOrBuilder();
/**
*
* If true, any resources such as Variables used in the session will not be
* shared with other sessions.
*
*
* bool isolate_session_state = 15;
*/
boolean getIsolateSessionState();
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
boolean hasExperimental();
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
org.tensorflow.framework.ConfigProto.Experimental getExperimental();
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder getExperimentalOrBuilder();
}