All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.tensorflow.framework.ConfigProtoOrBuilder Maven / Gradle / Ivy

There is a newer version: 1.15.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto

package org.tensorflow.framework;

public interface ConfigProtoOrBuilder extends
    // @@protoc_insertion_point(interface_extends:tensorflow.ConfigProto)
    com.google.protobuf.MessageOrBuilder {

  /**
   * 
   * Map from device type name (e.g., "CPU" or "GPU" ) to maximum
   * number of devices of that type to use.  If a particular device
   * type is not found in the map, the system picks an appropriate
   * number.
   * 
* * map<string, int32> device_count = 1; */ int getDeviceCountCount(); /** *
   * Map from device type name (e.g., "CPU" or "GPU" ) to maximum
   * number of devices of that type to use.  If a particular device
   * type is not found in the map, the system picks an appropriate
   * number.
   * 
* * map<string, int32> device_count = 1; */ boolean containsDeviceCount( java.lang.String key); /** * Use {@link #getDeviceCountMap()} instead. */ @java.lang.Deprecated java.util.Map getDeviceCount(); /** *
   * Map from device type name (e.g., "CPU" or "GPU" ) to maximum
   * number of devices of that type to use.  If a particular device
   * type is not found in the map, the system picks an appropriate
   * number.
   * 
* * map<string, int32> device_count = 1; */ java.util.Map getDeviceCountMap(); /** *
   * Map from device type name (e.g., "CPU" or "GPU" ) to maximum
   * number of devices of that type to use.  If a particular device
   * type is not found in the map, the system picks an appropriate
   * number.
   * 
* * map<string, int32> device_count = 1; */ int getDeviceCountOrDefault( java.lang.String key, int defaultValue); /** *
   * Map from device type name (e.g., "CPU" or "GPU" ) to maximum
   * number of devices of that type to use.  If a particular device
   * type is not found in the map, the system picks an appropriate
   * number.
   * 
* * map<string, int32> device_count = 1; */ int getDeviceCountOrThrow( java.lang.String key); /** *
   * The execution of an individual op (for some op types) can be
   * parallelized on a pool of intra_op_parallelism_threads.
   * 0 means the system picks an appropriate number.
   * 
* * int32 intra_op_parallelism_threads = 2; */ int getIntraOpParallelismThreads(); /** *
   * Nodes that perform blocking operations are enqueued on a pool of
   * inter_op_parallelism_threads available in each process.
   * 0 means the system picks an appropriate number.
   * Negative means all operations are performed in caller's thread.
   * Note that the first Session created in the process sets the
   * number of threads for all future sessions unless use_per_session_threads is
   * true or session_inter_op_thread_pool is configured.
   * 
* * int32 inter_op_parallelism_threads = 5; */ int getInterOpParallelismThreads(); /** *
   * If true, use a new set of threads for this session rather than the global
   * pool of threads. Only supported by direct sessions.
   * If false, use the global threads created by the first session, or the
   * per-session thread pools configured by session_inter_op_thread_pool.
   * This option is deprecated. The same effect can be achieved by setting
   * session_inter_op_thread_pool to have one element, whose num_threads equals
   * inter_op_parallelism_threads.
   * 
* * bool use_per_session_threads = 9; */ boolean getUsePerSessionThreads(); /** *
   * This option is experimental - it may be replaced with a different mechanism
   * in the future.
   * Configures session thread pools. If this is configured, then RunOptions for
   * a Run call can select the thread pool to use.
   * The intended use is for when some session invocations need to run in a
   * background pool limited to a small number of threads:
   * - For example, a session may be configured to have one large pool (for
   * regular compute) and one small pool (for periodic, low priority work);
   * using the small pool is currently the mechanism for limiting the inter-op
   * parallelism of the low priority work.  Note that it does not limit the
   * parallelism of work spawned by a single op kernel implementation.
   * - Using this setting is normally not needed in training, but may help some
   * serving use cases.
   * - It is also generally recommended to set the global_name field of this
   * proto, to avoid creating multiple large pools. It is typically better to
   * run the non-low-priority work, even across sessions, in a single large
   * pool.
   * 
* * repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; */ java.util.List getSessionInterOpThreadPoolList(); /** *
   * This option is experimental - it may be replaced with a different mechanism
   * in the future.
   * Configures session thread pools. If this is configured, then RunOptions for
   * a Run call can select the thread pool to use.
   * The intended use is for when some session invocations need to run in a
   * background pool limited to a small number of threads:
   * - For example, a session may be configured to have one large pool (for
   * regular compute) and one small pool (for periodic, low priority work);
   * using the small pool is currently the mechanism for limiting the inter-op
   * parallelism of the low priority work.  Note that it does not limit the
   * parallelism of work spawned by a single op kernel implementation.
   * - Using this setting is normally not needed in training, but may help some
   * serving use cases.
   * - It is also generally recommended to set the global_name field of this
   * proto, to avoid creating multiple large pools. It is typically better to
   * run the non-low-priority work, even across sessions, in a single large
   * pool.
   * 
* * repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; */ org.tensorflow.framework.ThreadPoolOptionProto getSessionInterOpThreadPool(int index); /** *
   * This option is experimental - it may be replaced with a different mechanism
   * in the future.
   * Configures session thread pools. If this is configured, then RunOptions for
   * a Run call can select the thread pool to use.
   * The intended use is for when some session invocations need to run in a
   * background pool limited to a small number of threads:
   * - For example, a session may be configured to have one large pool (for
   * regular compute) and one small pool (for periodic, low priority work);
   * using the small pool is currently the mechanism for limiting the inter-op
   * parallelism of the low priority work.  Note that it does not limit the
   * parallelism of work spawned by a single op kernel implementation.
   * - Using this setting is normally not needed in training, but may help some
   * serving use cases.
   * - It is also generally recommended to set the global_name field of this
   * proto, to avoid creating multiple large pools. It is typically better to
   * run the non-low-priority work, even across sessions, in a single large
   * pool.
   * 
* * repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; */ int getSessionInterOpThreadPoolCount(); /** *
   * This option is experimental - it may be replaced with a different mechanism
   * in the future.
   * Configures session thread pools. If this is configured, then RunOptions for
   * a Run call can select the thread pool to use.
   * The intended use is for when some session invocations need to run in a
   * background pool limited to a small number of threads:
   * - For example, a session may be configured to have one large pool (for
   * regular compute) and one small pool (for periodic, low priority work);
   * using the small pool is currently the mechanism for limiting the inter-op
   * parallelism of the low priority work.  Note that it does not limit the
   * parallelism of work spawned by a single op kernel implementation.
   * - Using this setting is normally not needed in training, but may help some
   * serving use cases.
   * - It is also generally recommended to set the global_name field of this
   * proto, to avoid creating multiple large pools. It is typically better to
   * run the non-low-priority work, even across sessions, in a single large
   * pool.
   * 
* * repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; */ java.util.List getSessionInterOpThreadPoolOrBuilderList(); /** *
   * This option is experimental - it may be replaced with a different mechanism
   * in the future.
   * Configures session thread pools. If this is configured, then RunOptions for
   * a Run call can select the thread pool to use.
   * The intended use is for when some session invocations need to run in a
   * background pool limited to a small number of threads:
   * - For example, a session may be configured to have one large pool (for
   * regular compute) and one small pool (for periodic, low priority work);
   * using the small pool is currently the mechanism for limiting the inter-op
   * parallelism of the low priority work.  Note that it does not limit the
   * parallelism of work spawned by a single op kernel implementation.
   * - Using this setting is normally not needed in training, but may help some
   * serving use cases.
   * - It is also generally recommended to set the global_name field of this
   * proto, to avoid creating multiple large pools. It is typically better to
   * run the non-low-priority work, even across sessions, in a single large
   * pool.
   * 
* * repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12; */ org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder getSessionInterOpThreadPoolOrBuilder( int index); /** *
   * Assignment of Nodes to Devices is recomputed every placement_period
   * steps until the system warms up (at which point the recomputation
   * typically slows down automatically).
   * 
* * int32 placement_period = 3; */ int getPlacementPeriod(); /** *
   * When any filters are present sessions will ignore all devices which do not
   * match the filters. Each filter can be partially specified, e.g. "/job:ps"
   * "/job:worker/replica:3", etc.
   * 
* * repeated string device_filters = 4; */ java.util.List getDeviceFiltersList(); /** *
   * When any filters are present sessions will ignore all devices which do not
   * match the filters. Each filter can be partially specified, e.g. "/job:ps"
   * "/job:worker/replica:3", etc.
   * 
* * repeated string device_filters = 4; */ int getDeviceFiltersCount(); /** *
   * When any filters are present sessions will ignore all devices which do not
   * match the filters. Each filter can be partially specified, e.g. "/job:ps"
   * "/job:worker/replica:3", etc.
   * 
* * repeated string device_filters = 4; */ java.lang.String getDeviceFilters(int index); /** *
   * When any filters are present sessions will ignore all devices which do not
   * match the filters. Each filter can be partially specified, e.g. "/job:ps"
   * "/job:worker/replica:3", etc.
   * 
* * repeated string device_filters = 4; */ com.google.protobuf.ByteString getDeviceFiltersBytes(int index); /** *
   * Options that apply to all GPUs.
   * 
* * .tensorflow.GPUOptions gpu_options = 6; */ boolean hasGpuOptions(); /** *
   * Options that apply to all GPUs.
   * 
* * .tensorflow.GPUOptions gpu_options = 6; */ org.tensorflow.framework.GPUOptions getGpuOptions(); /** *
   * Options that apply to all GPUs.
   * 
* * .tensorflow.GPUOptions gpu_options = 6; */ org.tensorflow.framework.GPUOptionsOrBuilder getGpuOptionsOrBuilder(); /** *
   * Whether soft placement is allowed. If allow_soft_placement is true,
   * an op will be placed on CPU if
   *   1. there's no GPU implementation for the OP
   * or
   *   2. no GPU devices are known or registered
   * or
   *   3. need to co-locate with reftype input(s) which are from CPU.
   * 
* * bool allow_soft_placement = 7; */ boolean getAllowSoftPlacement(); /** *
   * Whether device placements should be logged.
   * 
* * bool log_device_placement = 8; */ boolean getLogDevicePlacement(); /** *
   * Options that apply to all graphs.
   * 
* * .tensorflow.GraphOptions graph_options = 10; */ boolean hasGraphOptions(); /** *
   * Options that apply to all graphs.
   * 
* * .tensorflow.GraphOptions graph_options = 10; */ org.tensorflow.framework.GraphOptions getGraphOptions(); /** *
   * Options that apply to all graphs.
   * 
* * .tensorflow.GraphOptions graph_options = 10; */ org.tensorflow.framework.GraphOptionsOrBuilder getGraphOptionsOrBuilder(); /** *
   * Global timeout for all blocking operations in this session.  If non-zero,
   * and not overridden on a per-operation basis, this value will be used as the
   * deadline for all blocking operations.
   * 
* * int64 operation_timeout_in_ms = 11; */ long getOperationTimeoutInMs(); /** *
   * Options that apply when this session uses the distributed runtime.
   * 
* * .tensorflow.RPCOptions rpc_options = 13; */ boolean hasRpcOptions(); /** *
   * Options that apply when this session uses the distributed runtime.
   * 
* * .tensorflow.RPCOptions rpc_options = 13; */ org.tensorflow.framework.RPCOptions getRpcOptions(); /** *
   * Options that apply when this session uses the distributed runtime.
   * 
* * .tensorflow.RPCOptions rpc_options = 13; */ org.tensorflow.framework.RPCOptionsOrBuilder getRpcOptionsOrBuilder(); /** *
   * Optional list of all workers to use in this session.
   * 
* * .tensorflow.ClusterDef cluster_def = 14; */ boolean hasClusterDef(); /** *
   * Optional list of all workers to use in this session.
   * 
* * .tensorflow.ClusterDef cluster_def = 14; */ org.tensorflow.distruntime.ClusterDef getClusterDef(); /** *
   * Optional list of all workers to use in this session.
   * 
* * .tensorflow.ClusterDef cluster_def = 14; */ org.tensorflow.distruntime.ClusterDefOrBuilder getClusterDefOrBuilder(); /** *
   * If true, any resources such as Variables used in the session will not be
   * shared with other sessions. However, when clusterspec propagation is
   * enabled, this field is ignored and sessions are always isolated.
   * 
* * bool isolate_session_state = 15; */ boolean getIsolateSessionState(); /** * .tensorflow.ConfigProto.Experimental experimental = 16; */ boolean hasExperimental(); /** * .tensorflow.ConfigProto.Experimental experimental = 16; */ org.tensorflow.framework.ConfigProto.Experimental getExperimental(); /** * .tensorflow.ConfigProto.Experimental experimental = 16; */ org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder getExperimentalOrBuilder(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy