All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.tensorflow.framework.CallableOptionsOrBuilder Maven / Gradle / Ivy

The newest version!
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto

package org.tensorflow.framework;

public interface CallableOptionsOrBuilder extends
    // @@protoc_insertion_point(interface_extends:tensorflow.CallableOptions)
    com.google.protobuf.MessageOrBuilder {

  /**
   * 
   * Tensors to be fed in the callable. Each feed is the name of a tensor.
   * 
* * repeated string feed = 1; */ java.util.List getFeedList(); /** *
   * Tensors to be fed in the callable. Each feed is the name of a tensor.
   * 
* * repeated string feed = 1; */ int getFeedCount(); /** *
   * Tensors to be fed in the callable. Each feed is the name of a tensor.
   * 
* * repeated string feed = 1; */ java.lang.String getFeed(int index); /** *
   * Tensors to be fed in the callable. Each feed is the name of a tensor.
   * 
* * repeated string feed = 1; */ com.google.protobuf.ByteString getFeedBytes(int index); /** *
   * Fetches. A list of tensor names. The caller of the callable expects a
   * tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
   * order of specified fetches does not change the execution order.
   * 
* * repeated string fetch = 2; */ java.util.List getFetchList(); /** *
   * Fetches. A list of tensor names. The caller of the callable expects a
   * tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
   * order of specified fetches does not change the execution order.
   * 
* * repeated string fetch = 2; */ int getFetchCount(); /** *
   * Fetches. A list of tensor names. The caller of the callable expects a
   * tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
   * order of specified fetches does not change the execution order.
   * 
* * repeated string fetch = 2; */ java.lang.String getFetch(int index); /** *
   * Fetches. A list of tensor names. The caller of the callable expects a
   * tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
   * order of specified fetches does not change the execution order.
   * 
* * repeated string fetch = 2; */ com.google.protobuf.ByteString getFetchBytes(int index); /** *
   * Target Nodes. A list of node names. The named nodes will be run by the
   * callable but their outputs will not be returned.
   * 
* * repeated string target = 3; */ java.util.List getTargetList(); /** *
   * Target Nodes. A list of node names. The named nodes will be run by the
   * callable but their outputs will not be returned.
   * 
* * repeated string target = 3; */ int getTargetCount(); /** *
   * Target Nodes. A list of node names. The named nodes will be run by the
   * callable but their outputs will not be returned.
   * 
* * repeated string target = 3; */ java.lang.String getTarget(int index); /** *
   * Target Nodes. A list of node names. The named nodes will be run by the
   * callable but their outputs will not be returned.
   * 
* * repeated string target = 3; */ com.google.protobuf.ByteString getTargetBytes(int index); /** *
   * Options that will be applied to each run.
   * 
* * .tensorflow.RunOptions run_options = 4; */ boolean hasRunOptions(); /** *
   * Options that will be applied to each run.
   * 
* * .tensorflow.RunOptions run_options = 4; */ org.tensorflow.framework.RunOptions getRunOptions(); /** *
   * Options that will be applied to each run.
   * 
* * .tensorflow.RunOptions run_options = 4; */ org.tensorflow.framework.RunOptionsOrBuilder getRunOptionsOrBuilder(); /** *
   * Tensors to be connected in the callable. Each TensorConnection denotes
   * a pair of tensors in the graph, between which an edge will be created
   * in the callable.
   * 
* * repeated .tensorflow.TensorConnection tensor_connection = 5; */ java.util.List getTensorConnectionList(); /** *
   * Tensors to be connected in the callable. Each TensorConnection denotes
   * a pair of tensors in the graph, between which an edge will be created
   * in the callable.
   * 
* * repeated .tensorflow.TensorConnection tensor_connection = 5; */ org.tensorflow.framework.TensorConnection getTensorConnection(int index); /** *
   * Tensors to be connected in the callable. Each TensorConnection denotes
   * a pair of tensors in the graph, between which an edge will be created
   * in the callable.
   * 
* * repeated .tensorflow.TensorConnection tensor_connection = 5; */ int getTensorConnectionCount(); /** *
   * Tensors to be connected in the callable. Each TensorConnection denotes
   * a pair of tensors in the graph, between which an edge will be created
   * in the callable.
   * 
* * repeated .tensorflow.TensorConnection tensor_connection = 5; */ java.util.List getTensorConnectionOrBuilderList(); /** *
   * Tensors to be connected in the callable. Each TensorConnection denotes
   * a pair of tensors in the graph, between which an edge will be created
   * in the callable.
   * 
* * repeated .tensorflow.TensorConnection tensor_connection = 5; */ org.tensorflow.framework.TensorConnectionOrBuilder getTensorConnectionOrBuilder( int index); /** *
   * The Tensor objects fed in the callable and fetched from the callable
   * are expected to be backed by host (CPU) memory by default.
   * The options below allow changing that - feeding tensors backed by
   * device memory, or returning tensors that are backed by device memory.
   * The maps below map the name of a feed/fetch tensor (which appears in
   * 'feed' or 'fetch' fields above), to the fully qualified name of the device
   * owning the memory backing the contents of the tensor.
   * For example, creating a callable with the following options:
   * CallableOptions {
   *   feed: "a:0"
   *   feed: "b:0"
   *   fetch: "x:0"
   *   fetch: "y:0"
   *   feed_devices: {
   *     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *   }
   *   fetch_devices: {
   *     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *  }
   * }
   * means that the Callable expects:
   * - The first argument ("a:0") is a Tensor backed by GPU memory.
   * - The second argument ("b:0") is a Tensor backed by host memory.
   * and of its return values:
   * - The first output ("x:0") will be backed by host memory.
   * - The second output ("y:0") will be backed by GPU memory.
   * FEEDS:
   * It is the responsibility of the caller to ensure that the memory of the fed
   * tensors will be correctly initialized and synchronized before it is
   * accessed by operations executed during the call to Session::RunCallable().
   * This is typically ensured by using the TensorFlow memory allocators
   * (Device::GetAllocator()) to create the Tensor to be fed.
   * Alternatively, for CUDA-enabled GPU devices, this typically means that the
   * operation that produced the contents of the tensor has completed, i.e., the
   * CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
   * cuStreamSynchronize()).
   * 
* * map<string, string> feed_devices = 6; */ int getFeedDevicesCount(); /** *
   * The Tensor objects fed in the callable and fetched from the callable
   * are expected to be backed by host (CPU) memory by default.
   * The options below allow changing that - feeding tensors backed by
   * device memory, or returning tensors that are backed by device memory.
   * The maps below map the name of a feed/fetch tensor (which appears in
   * 'feed' or 'fetch' fields above), to the fully qualified name of the device
   * owning the memory backing the contents of the tensor.
   * For example, creating a callable with the following options:
   * CallableOptions {
   *   feed: "a:0"
   *   feed: "b:0"
   *   fetch: "x:0"
   *   fetch: "y:0"
   *   feed_devices: {
   *     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *   }
   *   fetch_devices: {
   *     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *  }
   * }
   * means that the Callable expects:
   * - The first argument ("a:0") is a Tensor backed by GPU memory.
   * - The second argument ("b:0") is a Tensor backed by host memory.
   * and of its return values:
   * - The first output ("x:0") will be backed by host memory.
   * - The second output ("y:0") will be backed by GPU memory.
   * FEEDS:
   * It is the responsibility of the caller to ensure that the memory of the fed
   * tensors will be correctly initialized and synchronized before it is
   * accessed by operations executed during the call to Session::RunCallable().
   * This is typically ensured by using the TensorFlow memory allocators
   * (Device::GetAllocator()) to create the Tensor to be fed.
   * Alternatively, for CUDA-enabled GPU devices, this typically means that the
   * operation that produced the contents of the tensor has completed, i.e., the
   * CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
   * cuStreamSynchronize()).
   * 
* * map<string, string> feed_devices = 6; */ boolean containsFeedDevices( java.lang.String key); /** * Use {@link #getFeedDevicesMap()} instead. */ @java.lang.Deprecated java.util.Map getFeedDevices(); /** *
   * The Tensor objects fed in the callable and fetched from the callable
   * are expected to be backed by host (CPU) memory by default.
   * The options below allow changing that - feeding tensors backed by
   * device memory, or returning tensors that are backed by device memory.
   * The maps below map the name of a feed/fetch tensor (which appears in
   * 'feed' or 'fetch' fields above), to the fully qualified name of the device
   * owning the memory backing the contents of the tensor.
   * For example, creating a callable with the following options:
   * CallableOptions {
   *   feed: "a:0"
   *   feed: "b:0"
   *   fetch: "x:0"
   *   fetch: "y:0"
   *   feed_devices: {
   *     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *   }
   *   fetch_devices: {
   *     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *  }
   * }
   * means that the Callable expects:
   * - The first argument ("a:0") is a Tensor backed by GPU memory.
   * - The second argument ("b:0") is a Tensor backed by host memory.
   * and of its return values:
   * - The first output ("x:0") will be backed by host memory.
   * - The second output ("y:0") will be backed by GPU memory.
   * FEEDS:
   * It is the responsibility of the caller to ensure that the memory of the fed
   * tensors will be correctly initialized and synchronized before it is
   * accessed by operations executed during the call to Session::RunCallable().
   * This is typically ensured by using the TensorFlow memory allocators
   * (Device::GetAllocator()) to create the Tensor to be fed.
   * Alternatively, for CUDA-enabled GPU devices, this typically means that the
   * operation that produced the contents of the tensor has completed, i.e., the
   * CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
   * cuStreamSynchronize()).
   * 
* * map<string, string> feed_devices = 6; */ java.util.Map getFeedDevicesMap(); /** *
   * The Tensor objects fed in the callable and fetched from the callable
   * are expected to be backed by host (CPU) memory by default.
   * The options below allow changing that - feeding tensors backed by
   * device memory, or returning tensors that are backed by device memory.
   * The maps below map the name of a feed/fetch tensor (which appears in
   * 'feed' or 'fetch' fields above), to the fully qualified name of the device
   * owning the memory backing the contents of the tensor.
   * For example, creating a callable with the following options:
   * CallableOptions {
   *   feed: "a:0"
   *   feed: "b:0"
   *   fetch: "x:0"
   *   fetch: "y:0"
   *   feed_devices: {
   *     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *   }
   *   fetch_devices: {
   *     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *  }
   * }
   * means that the Callable expects:
   * - The first argument ("a:0") is a Tensor backed by GPU memory.
   * - The second argument ("b:0") is a Tensor backed by host memory.
   * and of its return values:
   * - The first output ("x:0") will be backed by host memory.
   * - The second output ("y:0") will be backed by GPU memory.
   * FEEDS:
   * It is the responsibility of the caller to ensure that the memory of the fed
   * tensors will be correctly initialized and synchronized before it is
   * accessed by operations executed during the call to Session::RunCallable().
   * This is typically ensured by using the TensorFlow memory allocators
   * (Device::GetAllocator()) to create the Tensor to be fed.
   * Alternatively, for CUDA-enabled GPU devices, this typically means that the
   * operation that produced the contents of the tensor has completed, i.e., the
   * CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
   * cuStreamSynchronize()).
   * 
* * map<string, string> feed_devices = 6; */ java.lang.String getFeedDevicesOrDefault( java.lang.String key, java.lang.String defaultValue); /** *
   * The Tensor objects fed in the callable and fetched from the callable
   * are expected to be backed by host (CPU) memory by default.
   * The options below allow changing that - feeding tensors backed by
   * device memory, or returning tensors that are backed by device memory.
   * The maps below map the name of a feed/fetch tensor (which appears in
   * 'feed' or 'fetch' fields above), to the fully qualified name of the device
   * owning the memory backing the contents of the tensor.
   * For example, creating a callable with the following options:
   * CallableOptions {
   *   feed: "a:0"
   *   feed: "b:0"
   *   fetch: "x:0"
   *   fetch: "y:0"
   *   feed_devices: {
   *     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *   }
   *   fetch_devices: {
   *     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
   *  }
   * }
   * means that the Callable expects:
   * - The first argument ("a:0") is a Tensor backed by GPU memory.
   * - The second argument ("b:0") is a Tensor backed by host memory.
   * and of its return values:
   * - The first output ("x:0") will be backed by host memory.
   * - The second output ("y:0") will be backed by GPU memory.
   * FEEDS:
   * It is the responsibility of the caller to ensure that the memory of the fed
   * tensors will be correctly initialized and synchronized before it is
   * accessed by operations executed during the call to Session::RunCallable().
   * This is typically ensured by using the TensorFlow memory allocators
   * (Device::GetAllocator()) to create the Tensor to be fed.
   * Alternatively, for CUDA-enabled GPU devices, this typically means that the
   * operation that produced the contents of the tensor has completed, i.e., the
   * CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
   * cuStreamSynchronize()).
   * 
* * map<string, string> feed_devices = 6; */ java.lang.String getFeedDevicesOrThrow( java.lang.String key); /** * map<string, string> fetch_devices = 7; */ int getFetchDevicesCount(); /** * map<string, string> fetch_devices = 7; */ boolean containsFetchDevices( java.lang.String key); /** * Use {@link #getFetchDevicesMap()} instead. */ @java.lang.Deprecated java.util.Map getFetchDevices(); /** * map<string, string> fetch_devices = 7; */ java.util.Map getFetchDevicesMap(); /** * map<string, string> fetch_devices = 7; */ java.lang.String getFetchDevicesOrDefault( java.lang.String key, java.lang.String defaultValue); /** * map<string, string> fetch_devices = 7; */ java.lang.String getFetchDevicesOrThrow( java.lang.String key); /** *
   * By default, RunCallable() will synchronize the GPU stream before returning
   * fetched tensors on a GPU device, to ensure that the values in those tensors
   * have been produced. This simplifies interacting with the tensors, but
   * potentially incurs a performance hit.
   * If this options is set to true, the caller is responsible for ensuring
   * that the values in the fetched tensors have been produced before they are
   * used. The caller can do this by invoking `Device::Sync()` on the underlying
   * device(s), or by feeding the tensors back to the same Session using
   * `feed_devices` with the same corresponding device name.
   * 
* * bool fetch_skip_sync = 8; */ boolean getFetchSkipSync(); }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy