org.tensorflow.framework.CallableOptions Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto Show documentation
Show all versions of proto Show documentation
Java API for TensorFlow protocol buffers.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
*
* Defines a subgraph in another `GraphDef` as a set of feed points and nodes
* to be fetched or executed.
* Compare with the arguments to `Session::Run()`.
*
*
* Protobuf type {@code tensorflow.CallableOptions}
*/
public final class CallableOptions extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.CallableOptions)
CallableOptionsOrBuilder {
private static final long serialVersionUID = 0L;
// Use CallableOptions.newBuilder() to construct.
private CallableOptions(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private CallableOptions() {
feed_ = com.google.protobuf.LazyStringArrayList.EMPTY;
fetch_ = com.google.protobuf.LazyStringArrayList.EMPTY;
target_ = com.google.protobuf.LazyStringArrayList.EMPTY;
tensorConnection_ = java.util.Collections.emptyList();
fetchSkipSync_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private CallableOptions(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
java.lang.String s = input.readStringRequireUtf8();
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
feed_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
feed_.add(s);
break;
}
case 18: {
java.lang.String s = input.readStringRequireUtf8();
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
fetch_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000002;
}
fetch_.add(s);
break;
}
case 26: {
java.lang.String s = input.readStringRequireUtf8();
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
target_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000004;
}
target_.add(s);
break;
}
case 34: {
org.tensorflow.framework.RunOptions.Builder subBuilder = null;
if (runOptions_ != null) {
subBuilder = runOptions_.toBuilder();
}
runOptions_ = input.readMessage(org.tensorflow.framework.RunOptions.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(runOptions_);
runOptions_ = subBuilder.buildPartial();
}
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
tensorConnection_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
tensorConnection_.add(
input.readMessage(org.tensorflow.framework.TensorConnection.parser(), extensionRegistry));
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
feedDevices_ = com.google.protobuf.MapField.newMapField(
FeedDevicesDefaultEntryHolder.defaultEntry);
mutable_bitField0_ |= 0x00000020;
}
com.google.protobuf.MapEntry
feedDevices__ = input.readMessage(
FeedDevicesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry);
feedDevices_.getMutableMap().put(
feedDevices__.getKey(), feedDevices__.getValue());
break;
}
case 58: {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
fetchDevices_ = com.google.protobuf.MapField.newMapField(
FetchDevicesDefaultEntryHolder.defaultEntry);
mutable_bitField0_ |= 0x00000040;
}
com.google.protobuf.MapEntry
fetchDevices__ = input.readMessage(
FetchDevicesDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry);
fetchDevices_.getMutableMap().put(
fetchDevices__.getKey(), fetchDevices__.getValue());
break;
}
case 64: {
fetchSkipSync_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
feed_ = feed_.getUnmodifiableView();
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
fetch_ = fetch_.getUnmodifiableView();
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
target_ = target_.getUnmodifiableView();
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
tensorConnection_ = java.util.Collections.unmodifiableList(tensorConnection_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_descriptor;
}
@SuppressWarnings({"rawtypes"})
protected com.google.protobuf.MapField internalGetMapField(
int number) {
switch (number) {
case 6:
return internalGetFeedDevices();
case 7:
return internalGetFetchDevices();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.CallableOptions.class, org.tensorflow.framework.CallableOptions.Builder.class);
}
private int bitField0_;
public static final int FEED_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList feed_;
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public com.google.protobuf.ProtocolStringList
getFeedList() {
return feed_;
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public int getFeedCount() {
return feed_.size();
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public java.lang.String getFeed(int index) {
return feed_.get(index);
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public com.google.protobuf.ByteString
getFeedBytes(int index) {
return feed_.getByteString(index);
}
public static final int FETCH_FIELD_NUMBER = 2;
private com.google.protobuf.LazyStringList fetch_;
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public com.google.protobuf.ProtocolStringList
getFetchList() {
return fetch_;
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public int getFetchCount() {
return fetch_.size();
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public java.lang.String getFetch(int index) {
return fetch_.get(index);
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public com.google.protobuf.ByteString
getFetchBytes(int index) {
return fetch_.getByteString(index);
}
public static final int TARGET_FIELD_NUMBER = 3;
private com.google.protobuf.LazyStringList target_;
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public com.google.protobuf.ProtocolStringList
getTargetList() {
return target_;
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public int getTargetCount() {
return target_.size();
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public java.lang.String getTarget(int index) {
return target_.get(index);
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public com.google.protobuf.ByteString
getTargetBytes(int index) {
return target_.getByteString(index);
}
public static final int RUN_OPTIONS_FIELD_NUMBER = 4;
private org.tensorflow.framework.RunOptions runOptions_;
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public boolean hasRunOptions() {
return runOptions_ != null;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public org.tensorflow.framework.RunOptions getRunOptions() {
return runOptions_ == null ? org.tensorflow.framework.RunOptions.getDefaultInstance() : runOptions_;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public org.tensorflow.framework.RunOptionsOrBuilder getRunOptionsOrBuilder() {
return getRunOptions();
}
public static final int TENSOR_CONNECTION_FIELD_NUMBER = 5;
private java.util.List tensorConnection_;
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public java.util.List getTensorConnectionList() {
return tensorConnection_;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public java.util.List extends org.tensorflow.framework.TensorConnectionOrBuilder>
getTensorConnectionOrBuilderList() {
return tensorConnection_;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public int getTensorConnectionCount() {
return tensorConnection_.size();
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnection getTensorConnection(int index) {
return tensorConnection_.get(index);
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnectionOrBuilder getTensorConnectionOrBuilder(
int index) {
return tensorConnection_.get(index);
}
public static final int FEED_DEVICES_FIELD_NUMBER = 6;
private static final class FeedDevicesDefaultEntryHolder {
static final com.google.protobuf.MapEntry<
java.lang.String, java.lang.String> defaultEntry =
com.google.protobuf.MapEntry
.newDefaultInstance(
org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_FeedDevicesEntry_descriptor,
com.google.protobuf.WireFormat.FieldType.STRING,
"",
com.google.protobuf.WireFormat.FieldType.STRING,
"");
}
private com.google.protobuf.MapField<
java.lang.String, java.lang.String> feedDevices_;
private com.google.protobuf.MapField
internalGetFeedDevices() {
if (feedDevices_ == null) {
return com.google.protobuf.MapField.emptyMapField(
FeedDevicesDefaultEntryHolder.defaultEntry);
}
return feedDevices_;
}
public int getFeedDevicesCount() {
return internalGetFeedDevices().getMap().size();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public boolean containsFeedDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetFeedDevices().getMap().containsKey(key);
}
/**
* Use {@link #getFeedDevicesMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getFeedDevices() {
return getFeedDevicesMap();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.util.Map getFeedDevicesMap() {
return internalGetFeedDevices().getMap();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.lang.String getFeedDevicesOrDefault(
java.lang.String key,
java.lang.String defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFeedDevices().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.lang.String getFeedDevicesOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFeedDevices().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public static final int FETCH_DEVICES_FIELD_NUMBER = 7;
private static final class FetchDevicesDefaultEntryHolder {
static final com.google.protobuf.MapEntry<
java.lang.String, java.lang.String> defaultEntry =
com.google.protobuf.MapEntry
.newDefaultInstance(
org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_FetchDevicesEntry_descriptor,
com.google.protobuf.WireFormat.FieldType.STRING,
"",
com.google.protobuf.WireFormat.FieldType.STRING,
"");
}
private com.google.protobuf.MapField<
java.lang.String, java.lang.String> fetchDevices_;
private com.google.protobuf.MapField
internalGetFetchDevices() {
if (fetchDevices_ == null) {
return com.google.protobuf.MapField.emptyMapField(
FetchDevicesDefaultEntryHolder.defaultEntry);
}
return fetchDevices_;
}
public int getFetchDevicesCount() {
return internalGetFetchDevices().getMap().size();
}
/**
* map<string, string> fetch_devices = 7;
*/
public boolean containsFetchDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetFetchDevices().getMap().containsKey(key);
}
/**
* Use {@link #getFetchDevicesMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getFetchDevices() {
return getFetchDevicesMap();
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.util.Map getFetchDevicesMap() {
return internalGetFetchDevices().getMap();
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.lang.String getFetchDevicesOrDefault(
java.lang.String key,
java.lang.String defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFetchDevices().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.lang.String getFetchDevicesOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFetchDevices().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public static final int FETCH_SKIP_SYNC_FIELD_NUMBER = 8;
private boolean fetchSkipSync_;
/**
*
* By default, RunCallable() will synchronize the GPU stream before returning
* fetched tensors on a GPU device, to ensure that the values in those tensors
* have been produced. This simplifies interacting with the tensors, but
* potentially incurs a performance hit.
* If this options is set to true, the caller is responsible for ensuring
* that the values in the fetched tensors have been produced before they are
* used. The caller can do this by invoking `Device::Sync()` on the underlying
* device(s), or by feeding the tensors back to the same Session using
* `feed_devices` with the same corresponding device name.
*
*
* bool fetch_skip_sync = 8;
*/
public boolean getFetchSkipSync() {
return fetchSkipSync_;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < feed_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, feed_.getRaw(i));
}
for (int i = 0; i < fetch_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, fetch_.getRaw(i));
}
for (int i = 0; i < target_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, target_.getRaw(i));
}
if (runOptions_ != null) {
output.writeMessage(4, getRunOptions());
}
for (int i = 0; i < tensorConnection_.size(); i++) {
output.writeMessage(5, tensorConnection_.get(i));
}
com.google.protobuf.GeneratedMessageV3
.serializeStringMapTo(
output,
internalGetFeedDevices(),
FeedDevicesDefaultEntryHolder.defaultEntry,
6);
com.google.protobuf.GeneratedMessageV3
.serializeStringMapTo(
output,
internalGetFetchDevices(),
FetchDevicesDefaultEntryHolder.defaultEntry,
7);
if (fetchSkipSync_ != false) {
output.writeBool(8, fetchSkipSync_);
}
unknownFields.writeTo(output);
}
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < feed_.size(); i++) {
dataSize += computeStringSizeNoTag(feed_.getRaw(i));
}
size += dataSize;
size += 1 * getFeedList().size();
}
{
int dataSize = 0;
for (int i = 0; i < fetch_.size(); i++) {
dataSize += computeStringSizeNoTag(fetch_.getRaw(i));
}
size += dataSize;
size += 1 * getFetchList().size();
}
{
int dataSize = 0;
for (int i = 0; i < target_.size(); i++) {
dataSize += computeStringSizeNoTag(target_.getRaw(i));
}
size += dataSize;
size += 1 * getTargetList().size();
}
if (runOptions_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, getRunOptions());
}
for (int i = 0; i < tensorConnection_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, tensorConnection_.get(i));
}
for (java.util.Map.Entry entry
: internalGetFeedDevices().getMap().entrySet()) {
com.google.protobuf.MapEntry
feedDevices__ = FeedDevicesDefaultEntryHolder.defaultEntry.newBuilderForType()
.setKey(entry.getKey())
.setValue(entry.getValue())
.build();
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, feedDevices__);
}
for (java.util.Map.Entry entry
: internalGetFetchDevices().getMap().entrySet()) {
com.google.protobuf.MapEntry
fetchDevices__ = FetchDevicesDefaultEntryHolder.defaultEntry.newBuilderForType()
.setKey(entry.getKey())
.setValue(entry.getValue())
.build();
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, fetchDevices__);
}
if (fetchSkipSync_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, fetchSkipSync_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.CallableOptions)) {
return super.equals(obj);
}
org.tensorflow.framework.CallableOptions other = (org.tensorflow.framework.CallableOptions) obj;
boolean result = true;
result = result && getFeedList()
.equals(other.getFeedList());
result = result && getFetchList()
.equals(other.getFetchList());
result = result && getTargetList()
.equals(other.getTargetList());
result = result && (hasRunOptions() == other.hasRunOptions());
if (hasRunOptions()) {
result = result && getRunOptions()
.equals(other.getRunOptions());
}
result = result && getTensorConnectionList()
.equals(other.getTensorConnectionList());
result = result && internalGetFeedDevices().equals(
other.internalGetFeedDevices());
result = result && internalGetFetchDevices().equals(
other.internalGetFetchDevices());
result = result && (getFetchSkipSync()
== other.getFetchSkipSync());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getFeedCount() > 0) {
hash = (37 * hash) + FEED_FIELD_NUMBER;
hash = (53 * hash) + getFeedList().hashCode();
}
if (getFetchCount() > 0) {
hash = (37 * hash) + FETCH_FIELD_NUMBER;
hash = (53 * hash) + getFetchList().hashCode();
}
if (getTargetCount() > 0) {
hash = (37 * hash) + TARGET_FIELD_NUMBER;
hash = (53 * hash) + getTargetList().hashCode();
}
if (hasRunOptions()) {
hash = (37 * hash) + RUN_OPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getRunOptions().hashCode();
}
if (getTensorConnectionCount() > 0) {
hash = (37 * hash) + TENSOR_CONNECTION_FIELD_NUMBER;
hash = (53 * hash) + getTensorConnectionList().hashCode();
}
if (!internalGetFeedDevices().getMap().isEmpty()) {
hash = (37 * hash) + FEED_DEVICES_FIELD_NUMBER;
hash = (53 * hash) + internalGetFeedDevices().hashCode();
}
if (!internalGetFetchDevices().getMap().isEmpty()) {
hash = (37 * hash) + FETCH_DEVICES_FIELD_NUMBER;
hash = (53 * hash) + internalGetFetchDevices().hashCode();
}
hash = (37 * hash) + FETCH_SKIP_SYNC_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getFetchSkipSync());
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.CallableOptions parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.CallableOptions parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.CallableOptions parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.CallableOptions parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.CallableOptions parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.CallableOptions parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.CallableOptions prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Defines a subgraph in another `GraphDef` as a set of feed points and nodes
* to be fetched or executed.
* Compare with the arguments to `Session::Run()`.
*
*
* Protobuf type {@code tensorflow.CallableOptions}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.CallableOptions)
org.tensorflow.framework.CallableOptionsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_descriptor;
}
@SuppressWarnings({"rawtypes"})
protected com.google.protobuf.MapField internalGetMapField(
int number) {
switch (number) {
case 6:
return internalGetFeedDevices();
case 7:
return internalGetFetchDevices();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
@SuppressWarnings({"rawtypes"})
protected com.google.protobuf.MapField internalGetMutableMapField(
int number) {
switch (number) {
case 6:
return internalGetMutableFeedDevices();
case 7:
return internalGetMutableFetchDevices();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.CallableOptions.class, org.tensorflow.framework.CallableOptions.Builder.class);
}
// Construct using org.tensorflow.framework.CallableOptions.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getTensorConnectionFieldBuilder();
}
}
public Builder clear() {
super.clear();
feed_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
fetch_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
target_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
if (runOptionsBuilder_ == null) {
runOptions_ = null;
} else {
runOptions_ = null;
runOptionsBuilder_ = null;
}
if (tensorConnectionBuilder_ == null) {
tensorConnection_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
tensorConnectionBuilder_.clear();
}
internalGetMutableFeedDevices().clear();
internalGetMutableFetchDevices().clear();
fetchSkipSync_ = false;
return this;
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_CallableOptions_descriptor;
}
public org.tensorflow.framework.CallableOptions getDefaultInstanceForType() {
return org.tensorflow.framework.CallableOptions.getDefaultInstance();
}
public org.tensorflow.framework.CallableOptions build() {
org.tensorflow.framework.CallableOptions result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.tensorflow.framework.CallableOptions buildPartial() {
org.tensorflow.framework.CallableOptions result = new org.tensorflow.framework.CallableOptions(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
feed_ = feed_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000001);
}
result.feed_ = feed_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
fetch_ = fetch_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000002);
}
result.fetch_ = fetch_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
target_ = target_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000004);
}
result.target_ = target_;
if (runOptionsBuilder_ == null) {
result.runOptions_ = runOptions_;
} else {
result.runOptions_ = runOptionsBuilder_.build();
}
if (tensorConnectionBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
tensorConnection_ = java.util.Collections.unmodifiableList(tensorConnection_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.tensorConnection_ = tensorConnection_;
} else {
result.tensorConnection_ = tensorConnectionBuilder_.build();
}
result.feedDevices_ = internalGetFeedDevices();
result.feedDevices_.makeImmutable();
result.fetchDevices_ = internalGetFetchDevices();
result.fetchDevices_.makeImmutable();
result.fetchSkipSync_ = fetchSkipSync_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder clone() {
return (Builder) super.clone();
}
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.CallableOptions) {
return mergeFrom((org.tensorflow.framework.CallableOptions)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.CallableOptions other) {
if (other == org.tensorflow.framework.CallableOptions.getDefaultInstance()) return this;
if (!other.feed_.isEmpty()) {
if (feed_.isEmpty()) {
feed_ = other.feed_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFeedIsMutable();
feed_.addAll(other.feed_);
}
onChanged();
}
if (!other.fetch_.isEmpty()) {
if (fetch_.isEmpty()) {
fetch_ = other.fetch_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureFetchIsMutable();
fetch_.addAll(other.fetch_);
}
onChanged();
}
if (!other.target_.isEmpty()) {
if (target_.isEmpty()) {
target_ = other.target_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureTargetIsMutable();
target_.addAll(other.target_);
}
onChanged();
}
if (other.hasRunOptions()) {
mergeRunOptions(other.getRunOptions());
}
if (tensorConnectionBuilder_ == null) {
if (!other.tensorConnection_.isEmpty()) {
if (tensorConnection_.isEmpty()) {
tensorConnection_ = other.tensorConnection_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureTensorConnectionIsMutable();
tensorConnection_.addAll(other.tensorConnection_);
}
onChanged();
}
} else {
if (!other.tensorConnection_.isEmpty()) {
if (tensorConnectionBuilder_.isEmpty()) {
tensorConnectionBuilder_.dispose();
tensorConnectionBuilder_ = null;
tensorConnection_ = other.tensorConnection_;
bitField0_ = (bitField0_ & ~0x00000010);
tensorConnectionBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTensorConnectionFieldBuilder() : null;
} else {
tensorConnectionBuilder_.addAllMessages(other.tensorConnection_);
}
}
}
internalGetMutableFeedDevices().mergeFrom(
other.internalGetFeedDevices());
internalGetMutableFetchDevices().mergeFrom(
other.internalGetFetchDevices());
if (other.getFetchSkipSync() != false) {
setFetchSkipSync(other.getFetchSkipSync());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.CallableOptions parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.CallableOptions) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private com.google.protobuf.LazyStringList feed_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureFeedIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
feed_ = new com.google.protobuf.LazyStringArrayList(feed_);
bitField0_ |= 0x00000001;
}
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public com.google.protobuf.ProtocolStringList
getFeedList() {
return feed_.getUnmodifiableView();
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public int getFeedCount() {
return feed_.size();
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public java.lang.String getFeed(int index) {
return feed_.get(index);
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public com.google.protobuf.ByteString
getFeedBytes(int index) {
return feed_.getByteString(index);
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public Builder setFeed(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFeedIsMutable();
feed_.set(index, value);
onChanged();
return this;
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public Builder addFeed(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFeedIsMutable();
feed_.add(value);
onChanged();
return this;
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public Builder addAllFeed(
java.lang.Iterable values) {
ensureFeedIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, feed_);
onChanged();
return this;
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public Builder clearFeed() {
feed_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
*
* Tensors to be fed in the callable. Each feed is the name of a tensor.
*
*
* repeated string feed = 1;
*/
public Builder addFeedBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
ensureFeedIsMutable();
feed_.add(value);
onChanged();
return this;
}
private com.google.protobuf.LazyStringList fetch_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureFetchIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
fetch_ = new com.google.protobuf.LazyStringArrayList(fetch_);
bitField0_ |= 0x00000002;
}
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public com.google.protobuf.ProtocolStringList
getFetchList() {
return fetch_.getUnmodifiableView();
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public int getFetchCount() {
return fetch_.size();
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public java.lang.String getFetch(int index) {
return fetch_.get(index);
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public com.google.protobuf.ByteString
getFetchBytes(int index) {
return fetch_.getByteString(index);
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public Builder setFetch(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFetchIsMutable();
fetch_.set(index, value);
onChanged();
return this;
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public Builder addFetch(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureFetchIsMutable();
fetch_.add(value);
onChanged();
return this;
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public Builder addAllFetch(
java.lang.Iterable values) {
ensureFetchIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, fetch_);
onChanged();
return this;
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public Builder clearFetch() {
fetch_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
*
* Fetches. A list of tensor names. The caller of the callable expects a
* tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
* order of specified fetches does not change the execution order.
*
*
* repeated string fetch = 2;
*/
public Builder addFetchBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
ensureFetchIsMutable();
fetch_.add(value);
onChanged();
return this;
}
private com.google.protobuf.LazyStringList target_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureTargetIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
target_ = new com.google.protobuf.LazyStringArrayList(target_);
bitField0_ |= 0x00000004;
}
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public com.google.protobuf.ProtocolStringList
getTargetList() {
return target_.getUnmodifiableView();
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public int getTargetCount() {
return target_.size();
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public java.lang.String getTarget(int index) {
return target_.get(index);
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public com.google.protobuf.ByteString
getTargetBytes(int index) {
return target_.getByteString(index);
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public Builder setTarget(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetIsMutable();
target_.set(index, value);
onChanged();
return this;
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public Builder addTarget(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureTargetIsMutable();
target_.add(value);
onChanged();
return this;
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public Builder addAllTarget(
java.lang.Iterable values) {
ensureTargetIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, target_);
onChanged();
return this;
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public Builder clearTarget() {
target_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
/**
*
* Target Nodes. A list of node names. The named nodes will be run by the
* callable but their outputs will not be returned.
*
*
* repeated string target = 3;
*/
public Builder addTargetBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
ensureTargetIsMutable();
target_.add(value);
onChanged();
return this;
}
private org.tensorflow.framework.RunOptions runOptions_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RunOptions, org.tensorflow.framework.RunOptions.Builder, org.tensorflow.framework.RunOptionsOrBuilder> runOptionsBuilder_;
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public boolean hasRunOptions() {
return runOptionsBuilder_ != null || runOptions_ != null;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public org.tensorflow.framework.RunOptions getRunOptions() {
if (runOptionsBuilder_ == null) {
return runOptions_ == null ? org.tensorflow.framework.RunOptions.getDefaultInstance() : runOptions_;
} else {
return runOptionsBuilder_.getMessage();
}
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public Builder setRunOptions(org.tensorflow.framework.RunOptions value) {
if (runOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
runOptions_ = value;
onChanged();
} else {
runOptionsBuilder_.setMessage(value);
}
return this;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public Builder setRunOptions(
org.tensorflow.framework.RunOptions.Builder builderForValue) {
if (runOptionsBuilder_ == null) {
runOptions_ = builderForValue.build();
onChanged();
} else {
runOptionsBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public Builder mergeRunOptions(org.tensorflow.framework.RunOptions value) {
if (runOptionsBuilder_ == null) {
if (runOptions_ != null) {
runOptions_ =
org.tensorflow.framework.RunOptions.newBuilder(runOptions_).mergeFrom(value).buildPartial();
} else {
runOptions_ = value;
}
onChanged();
} else {
runOptionsBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public Builder clearRunOptions() {
if (runOptionsBuilder_ == null) {
runOptions_ = null;
onChanged();
} else {
runOptions_ = null;
runOptionsBuilder_ = null;
}
return this;
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public org.tensorflow.framework.RunOptions.Builder getRunOptionsBuilder() {
onChanged();
return getRunOptionsFieldBuilder().getBuilder();
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
public org.tensorflow.framework.RunOptionsOrBuilder getRunOptionsOrBuilder() {
if (runOptionsBuilder_ != null) {
return runOptionsBuilder_.getMessageOrBuilder();
} else {
return runOptions_ == null ?
org.tensorflow.framework.RunOptions.getDefaultInstance() : runOptions_;
}
}
/**
*
* Options that will be applied to each run.
*
*
* .tensorflow.RunOptions run_options = 4;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RunOptions, org.tensorflow.framework.RunOptions.Builder, org.tensorflow.framework.RunOptionsOrBuilder>
getRunOptionsFieldBuilder() {
if (runOptionsBuilder_ == null) {
runOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RunOptions, org.tensorflow.framework.RunOptions.Builder, org.tensorflow.framework.RunOptionsOrBuilder>(
getRunOptions(),
getParentForChildren(),
isClean());
runOptions_ = null;
}
return runOptionsBuilder_;
}
private java.util.List tensorConnection_ =
java.util.Collections.emptyList();
private void ensureTensorConnectionIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
tensorConnection_ = new java.util.ArrayList(tensorConnection_);
bitField0_ |= 0x00000010;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.TensorConnection, org.tensorflow.framework.TensorConnection.Builder, org.tensorflow.framework.TensorConnectionOrBuilder> tensorConnectionBuilder_;
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public java.util.List getTensorConnectionList() {
if (tensorConnectionBuilder_ == null) {
return java.util.Collections.unmodifiableList(tensorConnection_);
} else {
return tensorConnectionBuilder_.getMessageList();
}
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public int getTensorConnectionCount() {
if (tensorConnectionBuilder_ == null) {
return tensorConnection_.size();
} else {
return tensorConnectionBuilder_.getCount();
}
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnection getTensorConnection(int index) {
if (tensorConnectionBuilder_ == null) {
return tensorConnection_.get(index);
} else {
return tensorConnectionBuilder_.getMessage(index);
}
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder setTensorConnection(
int index, org.tensorflow.framework.TensorConnection value) {
if (tensorConnectionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTensorConnectionIsMutable();
tensorConnection_.set(index, value);
onChanged();
} else {
tensorConnectionBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder setTensorConnection(
int index, org.tensorflow.framework.TensorConnection.Builder builderForValue) {
if (tensorConnectionBuilder_ == null) {
ensureTensorConnectionIsMutable();
tensorConnection_.set(index, builderForValue.build());
onChanged();
} else {
tensorConnectionBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder addTensorConnection(org.tensorflow.framework.TensorConnection value) {
if (tensorConnectionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTensorConnectionIsMutable();
tensorConnection_.add(value);
onChanged();
} else {
tensorConnectionBuilder_.addMessage(value);
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder addTensorConnection(
int index, org.tensorflow.framework.TensorConnection value) {
if (tensorConnectionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTensorConnectionIsMutable();
tensorConnection_.add(index, value);
onChanged();
} else {
tensorConnectionBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder addTensorConnection(
org.tensorflow.framework.TensorConnection.Builder builderForValue) {
if (tensorConnectionBuilder_ == null) {
ensureTensorConnectionIsMutable();
tensorConnection_.add(builderForValue.build());
onChanged();
} else {
tensorConnectionBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder addTensorConnection(
int index, org.tensorflow.framework.TensorConnection.Builder builderForValue) {
if (tensorConnectionBuilder_ == null) {
ensureTensorConnectionIsMutable();
tensorConnection_.add(index, builderForValue.build());
onChanged();
} else {
tensorConnectionBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder addAllTensorConnection(
java.lang.Iterable extends org.tensorflow.framework.TensorConnection> values) {
if (tensorConnectionBuilder_ == null) {
ensureTensorConnectionIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, tensorConnection_);
onChanged();
} else {
tensorConnectionBuilder_.addAllMessages(values);
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder clearTensorConnection() {
if (tensorConnectionBuilder_ == null) {
tensorConnection_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
tensorConnectionBuilder_.clear();
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public Builder removeTensorConnection(int index) {
if (tensorConnectionBuilder_ == null) {
ensureTensorConnectionIsMutable();
tensorConnection_.remove(index);
onChanged();
} else {
tensorConnectionBuilder_.remove(index);
}
return this;
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnection.Builder getTensorConnectionBuilder(
int index) {
return getTensorConnectionFieldBuilder().getBuilder(index);
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnectionOrBuilder getTensorConnectionOrBuilder(
int index) {
if (tensorConnectionBuilder_ == null) {
return tensorConnection_.get(index); } else {
return tensorConnectionBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public java.util.List extends org.tensorflow.framework.TensorConnectionOrBuilder>
getTensorConnectionOrBuilderList() {
if (tensorConnectionBuilder_ != null) {
return tensorConnectionBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(tensorConnection_);
}
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnection.Builder addTensorConnectionBuilder() {
return getTensorConnectionFieldBuilder().addBuilder(
org.tensorflow.framework.TensorConnection.getDefaultInstance());
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public org.tensorflow.framework.TensorConnection.Builder addTensorConnectionBuilder(
int index) {
return getTensorConnectionFieldBuilder().addBuilder(
index, org.tensorflow.framework.TensorConnection.getDefaultInstance());
}
/**
*
* Tensors to be connected in the callable. Each TensorConnection denotes
* a pair of tensors in the graph, between which an edge will be created
* in the callable.
*
*
* repeated .tensorflow.TensorConnection tensor_connection = 5;
*/
public java.util.List
getTensorConnectionBuilderList() {
return getTensorConnectionFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.TensorConnection, org.tensorflow.framework.TensorConnection.Builder, org.tensorflow.framework.TensorConnectionOrBuilder>
getTensorConnectionFieldBuilder() {
if (tensorConnectionBuilder_ == null) {
tensorConnectionBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.TensorConnection, org.tensorflow.framework.TensorConnection.Builder, org.tensorflow.framework.TensorConnectionOrBuilder>(
tensorConnection_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
tensorConnection_ = null;
}
return tensorConnectionBuilder_;
}
private com.google.protobuf.MapField<
java.lang.String, java.lang.String> feedDevices_;
private com.google.protobuf.MapField
internalGetFeedDevices() {
if (feedDevices_ == null) {
return com.google.protobuf.MapField.emptyMapField(
FeedDevicesDefaultEntryHolder.defaultEntry);
}
return feedDevices_;
}
private com.google.protobuf.MapField
internalGetMutableFeedDevices() {
onChanged();;
if (feedDevices_ == null) {
feedDevices_ = com.google.protobuf.MapField.newMapField(
FeedDevicesDefaultEntryHolder.defaultEntry);
}
if (!feedDevices_.isMutable()) {
feedDevices_ = feedDevices_.copy();
}
return feedDevices_;
}
public int getFeedDevicesCount() {
return internalGetFeedDevices().getMap().size();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public boolean containsFeedDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetFeedDevices().getMap().containsKey(key);
}
/**
* Use {@link #getFeedDevicesMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getFeedDevices() {
return getFeedDevicesMap();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.util.Map getFeedDevicesMap() {
return internalGetFeedDevices().getMap();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.lang.String getFeedDevicesOrDefault(
java.lang.String key,
java.lang.String defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFeedDevices().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public java.lang.String getFeedDevicesOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFeedDevices().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public Builder clearFeedDevices() {
internalGetMutableFeedDevices().getMutableMap()
.clear();
return this;
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public Builder removeFeedDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
internalGetMutableFeedDevices().getMutableMap()
.remove(key);
return this;
}
/**
* Use alternate mutation accessors instead.
*/
@java.lang.Deprecated
public java.util.Map
getMutableFeedDevices() {
return internalGetMutableFeedDevices().getMutableMap();
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public Builder putFeedDevices(
java.lang.String key,
java.lang.String value) {
if (key == null) { throw new java.lang.NullPointerException(); }
if (value == null) { throw new java.lang.NullPointerException(); }
internalGetMutableFeedDevices().getMutableMap()
.put(key, value);
return this;
}
/**
*
* The Tensor objects fed in the callable and fetched from the callable
* are expected to be backed by host (CPU) memory by default.
* The options below allow changing that - feeding tensors backed by
* device memory, or returning tensors that are backed by device memory.
* The maps below map the name of a feed/fetch tensor (which appears in
* 'feed' or 'fetch' fields above), to the fully qualified name of the device
* owning the memory backing the contents of the tensor.
* For example, creating a callable with the following options:
* CallableOptions {
* feed: "a:0"
* feed: "b:0"
* fetch: "x:0"
* fetch: "y:0"
* feed_devices: {
* "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* fetch_devices: {
* "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
* }
* }
* means that the Callable expects:
* - The first argument ("a:0") is a Tensor backed by GPU memory.
* - The second argument ("b:0") is a Tensor backed by host memory.
* and of its return values:
* - The first output ("x:0") will be backed by host memory.
* - The second output ("y:0") will be backed by GPU memory.
* FEEDS:
* It is the responsibility of the caller to ensure that the memory of the fed
* tensors will be correctly initialized and synchronized before it is
* accessed by operations executed during the call to Session::RunCallable().
* This is typically ensured by using the TensorFlow memory allocators
* (Device::GetAllocator()) to create the Tensor to be fed.
* Alternatively, for CUDA-enabled GPU devices, this typically means that the
* operation that produced the contents of the tensor has completed, i.e., the
* CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
* cuStreamSynchronize()).
*
*
* map<string, string> feed_devices = 6;
*/
public Builder putAllFeedDevices(
java.util.Map values) {
internalGetMutableFeedDevices().getMutableMap()
.putAll(values);
return this;
}
private com.google.protobuf.MapField<
java.lang.String, java.lang.String> fetchDevices_;
private com.google.protobuf.MapField
internalGetFetchDevices() {
if (fetchDevices_ == null) {
return com.google.protobuf.MapField.emptyMapField(
FetchDevicesDefaultEntryHolder.defaultEntry);
}
return fetchDevices_;
}
private com.google.protobuf.MapField
internalGetMutableFetchDevices() {
onChanged();;
if (fetchDevices_ == null) {
fetchDevices_ = com.google.protobuf.MapField.newMapField(
FetchDevicesDefaultEntryHolder.defaultEntry);
}
if (!fetchDevices_.isMutable()) {
fetchDevices_ = fetchDevices_.copy();
}
return fetchDevices_;
}
public int getFetchDevicesCount() {
return internalGetFetchDevices().getMap().size();
}
/**
* map<string, string> fetch_devices = 7;
*/
public boolean containsFetchDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetFetchDevices().getMap().containsKey(key);
}
/**
* Use {@link #getFetchDevicesMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getFetchDevices() {
return getFetchDevicesMap();
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.util.Map getFetchDevicesMap() {
return internalGetFetchDevices().getMap();
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.lang.String getFetchDevicesOrDefault(
java.lang.String key,
java.lang.String defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFetchDevices().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
* map<string, string> fetch_devices = 7;
*/
public java.lang.String getFetchDevicesOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetFetchDevices().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public Builder clearFetchDevices() {
internalGetMutableFetchDevices().getMutableMap()
.clear();
return this;
}
/**
* map<string, string> fetch_devices = 7;
*/
public Builder removeFetchDevices(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
internalGetMutableFetchDevices().getMutableMap()
.remove(key);
return this;
}
/**
* Use alternate mutation accessors instead.
*/
@java.lang.Deprecated
public java.util.Map
getMutableFetchDevices() {
return internalGetMutableFetchDevices().getMutableMap();
}
/**
* map<string, string> fetch_devices = 7;
*/
public Builder putFetchDevices(
java.lang.String key,
java.lang.String value) {
if (key == null) { throw new java.lang.NullPointerException(); }
if (value == null) { throw new java.lang.NullPointerException(); }
internalGetMutableFetchDevices().getMutableMap()
.put(key, value);
return this;
}
/**
* map<string, string> fetch_devices = 7;
*/
public Builder putAllFetchDevices(
java.util.Map values) {
internalGetMutableFetchDevices().getMutableMap()
.putAll(values);
return this;
}
private boolean fetchSkipSync_ ;
/**
*
* By default, RunCallable() will synchronize the GPU stream before returning
* fetched tensors on a GPU device, to ensure that the values in those tensors
* have been produced. This simplifies interacting with the tensors, but
* potentially incurs a performance hit.
* If this options is set to true, the caller is responsible for ensuring
* that the values in the fetched tensors have been produced before they are
* used. The caller can do this by invoking `Device::Sync()` on the underlying
* device(s), or by feeding the tensors back to the same Session using
* `feed_devices` with the same corresponding device name.
*
*
* bool fetch_skip_sync = 8;
*/
public boolean getFetchSkipSync() {
return fetchSkipSync_;
}
/**
*
* By default, RunCallable() will synchronize the GPU stream before returning
* fetched tensors on a GPU device, to ensure that the values in those tensors
* have been produced. This simplifies interacting with the tensors, but
* potentially incurs a performance hit.
* If this options is set to true, the caller is responsible for ensuring
* that the values in the fetched tensors have been produced before they are
* used. The caller can do this by invoking `Device::Sync()` on the underlying
* device(s), or by feeding the tensors back to the same Session using
* `feed_devices` with the same corresponding device name.
*
*
* bool fetch_skip_sync = 8;
*/
public Builder setFetchSkipSync(boolean value) {
fetchSkipSync_ = value;
onChanged();
return this;
}
/**
*
* By default, RunCallable() will synchronize the GPU stream before returning
* fetched tensors on a GPU device, to ensure that the values in those tensors
* have been produced. This simplifies interacting with the tensors, but
* potentially incurs a performance hit.
* If this options is set to true, the caller is responsible for ensuring
* that the values in the fetched tensors have been produced before they are
* used. The caller can do this by invoking `Device::Sync()` on the underlying
* device(s), or by feeding the tensors back to the same Session using
* `feed_devices` with the same corresponding device name.
*
*
* bool fetch_skip_sync = 8;
*/
public Builder clearFetchSkipSync() {
fetchSkipSync_ = false;
onChanged();
return this;
}
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.CallableOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.CallableOptions)
private static final org.tensorflow.framework.CallableOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.CallableOptions();
}
public static org.tensorflow.framework.CallableOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
public CallableOptions parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new CallableOptions(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
public org.tensorflow.framework.CallableOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy