org.tensorflow.framework.ConfigProto Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
/**
*
* Session configuration parameters.
* The system picks appropriate values for fields that are not set.
*
*
* Protobuf type {@code tensorflow.ConfigProto}
*/
public final class ConfigProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.ConfigProto)
ConfigProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ConfigProto.newBuilder() to construct.
private ConfigProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ConfigProto() {
intraOpParallelismThreads_ = 0;
interOpParallelismThreads_ = 0;
usePerSessionThreads_ = false;
sessionInterOpThreadPool_ = java.util.Collections.emptyList();
placementPeriod_ = 0;
deviceFilters_ = com.google.protobuf.LazyStringArrayList.EMPTY;
allowSoftPlacement_ = false;
logDevicePlacement_ = false;
operationTimeoutInMs_ = 0L;
isolateSessionState_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ConfigProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
deviceCount_ = com.google.protobuf.MapField.newMapField(
DeviceCountDefaultEntryHolder.defaultEntry);
mutable_bitField0_ |= 0x00000001;
}
com.google.protobuf.MapEntry
deviceCount__ = input.readMessage(
DeviceCountDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry);
deviceCount_.getMutableMap().put(
deviceCount__.getKey(), deviceCount__.getValue());
break;
}
case 16: {
intraOpParallelismThreads_ = input.readInt32();
break;
}
case 24: {
placementPeriod_ = input.readInt32();
break;
}
case 34: {
java.lang.String s = input.readStringRequireUtf8();
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
deviceFilters_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000040;
}
deviceFilters_.add(s);
break;
}
case 40: {
interOpParallelismThreads_ = input.readInt32();
break;
}
case 50: {
org.tensorflow.framework.GPUOptions.Builder subBuilder = null;
if (gpuOptions_ != null) {
subBuilder = gpuOptions_.toBuilder();
}
gpuOptions_ = input.readMessage(org.tensorflow.framework.GPUOptions.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(gpuOptions_);
gpuOptions_ = subBuilder.buildPartial();
}
break;
}
case 56: {
allowSoftPlacement_ = input.readBool();
break;
}
case 64: {
logDevicePlacement_ = input.readBool();
break;
}
case 72: {
usePerSessionThreads_ = input.readBool();
break;
}
case 82: {
org.tensorflow.framework.GraphOptions.Builder subBuilder = null;
if (graphOptions_ != null) {
subBuilder = graphOptions_.toBuilder();
}
graphOptions_ = input.readMessage(org.tensorflow.framework.GraphOptions.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(graphOptions_);
graphOptions_ = subBuilder.buildPartial();
}
break;
}
case 88: {
operationTimeoutInMs_ = input.readInt64();
break;
}
case 98: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
sessionInterOpThreadPool_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
sessionInterOpThreadPool_.add(
input.readMessage(org.tensorflow.framework.ThreadPoolOptionProto.parser(), extensionRegistry));
break;
}
case 106: {
org.tensorflow.framework.RPCOptions.Builder subBuilder = null;
if (rpcOptions_ != null) {
subBuilder = rpcOptions_.toBuilder();
}
rpcOptions_ = input.readMessage(org.tensorflow.framework.RPCOptions.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(rpcOptions_);
rpcOptions_ = subBuilder.buildPartial();
}
break;
}
case 114: {
org.tensorflow.distruntime.ClusterDef.Builder subBuilder = null;
if (clusterDef_ != null) {
subBuilder = clusterDef_.toBuilder();
}
clusterDef_ = input.readMessage(org.tensorflow.distruntime.ClusterDef.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(clusterDef_);
clusterDef_ = subBuilder.buildPartial();
}
break;
}
case 120: {
isolateSessionState_ = input.readBool();
break;
}
case 130: {
org.tensorflow.framework.ConfigProto.Experimental.Builder subBuilder = null;
if (experimental_ != null) {
subBuilder = experimental_.toBuilder();
}
experimental_ = input.readMessage(org.tensorflow.framework.ConfigProto.Experimental.parser(), extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(experimental_);
experimental_ = subBuilder.buildPartial();
}
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
deviceFilters_ = deviceFilters_.getUnmodifiableView();
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
sessionInterOpThreadPool_ = java.util.Collections.unmodifiableList(sessionInterOpThreadPool_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_descriptor;
}
@SuppressWarnings({"rawtypes"})
@java.lang.Override
protected com.google.protobuf.MapField internalGetMapField(
int number) {
switch (number) {
case 1:
return internalGetDeviceCount();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.ConfigProto.class, org.tensorflow.framework.ConfigProto.Builder.class);
}
public interface ExperimentalOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.ConfigProto.Experimental)
com.google.protobuf.MessageOrBuilder {
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
java.lang.String getCollectiveGroupLeader();
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
com.google.protobuf.ByteString
getCollectiveGroupLeaderBytes();
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
java.lang.String getExecutorType();
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
com.google.protobuf.ByteString
getExecutorTypeBytes();
}
/**
*
* Everything inside Experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* Protobuf type {@code tensorflow.ConfigProto.Experimental}
*/
public static final class Experimental extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:tensorflow.ConfigProto.Experimental)
ExperimentalOrBuilder {
private static final long serialVersionUID = 0L;
// Use Experimental.newBuilder() to construct.
private Experimental(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Experimental() {
collectiveGroupLeader_ = "";
executorType_ = "";
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Experimental(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
java.lang.String s = input.readStringRequireUtf8();
collectiveGroupLeader_ = s;
break;
}
case 26: {
java.lang.String s = input.readStringRequireUtf8();
executorType_ = s;
break;
}
default: {
if (!parseUnknownFieldProto3(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_Experimental_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.ConfigProto.Experimental.class, org.tensorflow.framework.ConfigProto.Experimental.Builder.class);
}
public static final int COLLECTIVE_GROUP_LEADER_FIELD_NUMBER = 1;
private volatile java.lang.Object collectiveGroupLeader_;
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public java.lang.String getCollectiveGroupLeader() {
java.lang.Object ref = collectiveGroupLeader_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
collectiveGroupLeader_ = s;
return s;
}
}
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public com.google.protobuf.ByteString
getCollectiveGroupLeaderBytes() {
java.lang.Object ref = collectiveGroupLeader_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
collectiveGroupLeader_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int EXECUTOR_TYPE_FIELD_NUMBER = 3;
private volatile java.lang.Object executorType_;
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public java.lang.String getExecutorType() {
java.lang.Object ref = executorType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
executorType_ = s;
return s;
}
}
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public com.google.protobuf.ByteString
getExecutorTypeBytes() {
java.lang.Object ref = executorType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
executorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (!getCollectiveGroupLeaderBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, collectiveGroupLeader_);
}
if (!getExecutorTypeBytes().isEmpty()) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, executorType_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (!getCollectiveGroupLeaderBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, collectiveGroupLeader_);
}
if (!getExecutorTypeBytes().isEmpty()) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, executorType_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.ConfigProto.Experimental)) {
return super.equals(obj);
}
org.tensorflow.framework.ConfigProto.Experimental other = (org.tensorflow.framework.ConfigProto.Experimental) obj;
boolean result = true;
result = result && getCollectiveGroupLeader()
.equals(other.getCollectiveGroupLeader());
result = result && getExecutorType()
.equals(other.getExecutorType());
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + COLLECTIVE_GROUP_LEADER_FIELD_NUMBER;
hash = (53 * hash) + getCollectiveGroupLeader().hashCode();
hash = (37 * hash) + EXECUTOR_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getExecutorType().hashCode();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto.Experimental parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.ConfigProto.Experimental prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Everything inside Experimental is subject to change and is not subject
* to API stability guarantees in
* https://www.tensorflow.org/guide/version_compat.
*
*
* Protobuf type {@code tensorflow.ConfigProto.Experimental}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.ConfigProto.Experimental)
org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_Experimental_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.ConfigProto.Experimental.class, org.tensorflow.framework.ConfigProto.Experimental.Builder.class);
}
// Construct using org.tensorflow.framework.ConfigProto.Experimental.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
collectiveGroupLeader_ = "";
executorType_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_Experimental_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto.Experimental getDefaultInstanceForType() {
return org.tensorflow.framework.ConfigProto.Experimental.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto.Experimental build() {
org.tensorflow.framework.ConfigProto.Experimental result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto.Experimental buildPartial() {
org.tensorflow.framework.ConfigProto.Experimental result = new org.tensorflow.framework.ConfigProto.Experimental(this);
result.collectiveGroupLeader_ = collectiveGroupLeader_;
result.executorType_ = executorType_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.ConfigProto.Experimental) {
return mergeFrom((org.tensorflow.framework.ConfigProto.Experimental)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.ConfigProto.Experimental other) {
if (other == org.tensorflow.framework.ConfigProto.Experimental.getDefaultInstance()) return this;
if (!other.getCollectiveGroupLeader().isEmpty()) {
collectiveGroupLeader_ = other.collectiveGroupLeader_;
onChanged();
}
if (!other.getExecutorType().isEmpty()) {
executorType_ = other.executorType_;
onChanged();
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.ConfigProto.Experimental parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.ConfigProto.Experimental) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private java.lang.Object collectiveGroupLeader_ = "";
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public java.lang.String getCollectiveGroupLeader() {
java.lang.Object ref = collectiveGroupLeader_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
collectiveGroupLeader_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public com.google.protobuf.ByteString
getCollectiveGroupLeaderBytes() {
java.lang.Object ref = collectiveGroupLeader_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
collectiveGroupLeader_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public Builder setCollectiveGroupLeader(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
collectiveGroupLeader_ = value;
onChanged();
return this;
}
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public Builder clearCollectiveGroupLeader() {
collectiveGroupLeader_ = getDefaultInstance().getCollectiveGroupLeader();
onChanged();
return this;
}
/**
*
* Task name for group resolution.
*
*
* string collective_group_leader = 1;
*/
public Builder setCollectiveGroupLeaderBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
collectiveGroupLeader_ = value;
onChanged();
return this;
}
private java.lang.Object executorType_ = "";
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public java.lang.String getExecutorType() {
java.lang.Object ref = executorType_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
executorType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public com.google.protobuf.ByteString
getExecutorTypeBytes() {
java.lang.Object ref = executorType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
executorType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public Builder setExecutorType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
executorType_ = value;
onChanged();
return this;
}
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public Builder clearExecutorType() {
executorType_ = getDefaultInstance().getExecutorType();
onChanged();
return this;
}
/**
*
* Which executor to use, the default executor will be used
* if it is an empty string or "DEFAULT"
*
*
* string executor_type = 3;
*/
public Builder setExecutorTypeBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
executorType_ = value;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.ConfigProto.Experimental)
}
// @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.Experimental)
private static final org.tensorflow.framework.ConfigProto.Experimental DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.ConfigProto.Experimental();
}
public static org.tensorflow.framework.ConfigProto.Experimental getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public Experimental parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Experimental(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto.Experimental getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private int bitField0_;
public static final int DEVICE_COUNT_FIELD_NUMBER = 1;
private static final class DeviceCountDefaultEntryHolder {
static final com.google.protobuf.MapEntry<
java.lang.String, java.lang.Integer> defaultEntry =
com.google.protobuf.MapEntry
.newDefaultInstance(
org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_DeviceCountEntry_descriptor,
com.google.protobuf.WireFormat.FieldType.STRING,
"",
com.google.protobuf.WireFormat.FieldType.INT32,
0);
}
private com.google.protobuf.MapField<
java.lang.String, java.lang.Integer> deviceCount_;
private com.google.protobuf.MapField
internalGetDeviceCount() {
if (deviceCount_ == null) {
return com.google.protobuf.MapField.emptyMapField(
DeviceCountDefaultEntryHolder.defaultEntry);
}
return deviceCount_;
}
public int getDeviceCountCount() {
return internalGetDeviceCount().getMap().size();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public boolean containsDeviceCount(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetDeviceCount().getMap().containsKey(key);
}
/**
* Use {@link #getDeviceCountMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getDeviceCount() {
return getDeviceCountMap();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public java.util.Map getDeviceCountMap() {
return internalGetDeviceCount().getMap();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public int getDeviceCountOrDefault(
java.lang.String key,
int defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetDeviceCount().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public int getDeviceCountOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetDeviceCount().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public static final int INTRA_OP_PARALLELISM_THREADS_FIELD_NUMBER = 2;
private int intraOpParallelismThreads_;
/**
*
* The execution of an individual op (for some op types) can be
* parallelized on a pool of intra_op_parallelism_threads.
* 0 means the system picks an appropriate number.
*
*
* int32 intra_op_parallelism_threads = 2;
*/
public int getIntraOpParallelismThreads() {
return intraOpParallelismThreads_;
}
public static final int INTER_OP_PARALLELISM_THREADS_FIELD_NUMBER = 5;
private int interOpParallelismThreads_;
/**
*
* Nodes that perform blocking operations are enqueued on a pool of
* inter_op_parallelism_threads available in each process.
* 0 means the system picks an appropriate number.
* Note that the first Session created in the process sets the
* number of threads for all future sessions unless use_per_session_threads is
* true or session_inter_op_thread_pool is configured.
*
*
* int32 inter_op_parallelism_threads = 5;
*/
public int getInterOpParallelismThreads() {
return interOpParallelismThreads_;
}
public static final int USE_PER_SESSION_THREADS_FIELD_NUMBER = 9;
private boolean usePerSessionThreads_;
/**
*
* If true, use a new set of threads for this session rather than the global
* pool of threads. Only supported by direct sessions.
* If false, use the global threads created by the first session, or the
* per-session thread pools configured by session_inter_op_thread_pool.
* This option is deprecated. The same effect can be achieved by setting
* session_inter_op_thread_pool to have one element, whose num_threads equals
* inter_op_parallelism_threads.
*
*
* bool use_per_session_threads = 9;
*/
public boolean getUsePerSessionThreads() {
return usePerSessionThreads_;
}
public static final int SESSION_INTER_OP_THREAD_POOL_FIELD_NUMBER = 12;
private java.util.List sessionInterOpThreadPool_;
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public java.util.List getSessionInterOpThreadPoolList() {
return sessionInterOpThreadPool_;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public java.util.List extends org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder>
getSessionInterOpThreadPoolOrBuilderList() {
return sessionInterOpThreadPool_;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public int getSessionInterOpThreadPoolCount() {
return sessionInterOpThreadPool_.size();
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProto getSessionInterOpThreadPool(int index) {
return sessionInterOpThreadPool_.get(index);
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder getSessionInterOpThreadPoolOrBuilder(
int index) {
return sessionInterOpThreadPool_.get(index);
}
public static final int PLACEMENT_PERIOD_FIELD_NUMBER = 3;
private int placementPeriod_;
/**
*
* Assignment of Nodes to Devices is recomputed every placement_period
* steps until the system warms up (at which point the recomputation
* typically slows down automatically).
*
*
* int32 placement_period = 3;
*/
public int getPlacementPeriod() {
return placementPeriod_;
}
public static final int DEVICE_FILTERS_FIELD_NUMBER = 4;
private com.google.protobuf.LazyStringList deviceFilters_;
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public com.google.protobuf.ProtocolStringList
getDeviceFiltersList() {
return deviceFilters_;
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public int getDeviceFiltersCount() {
return deviceFilters_.size();
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public java.lang.String getDeviceFilters(int index) {
return deviceFilters_.get(index);
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public com.google.protobuf.ByteString
getDeviceFiltersBytes(int index) {
return deviceFilters_.getByteString(index);
}
public static final int GPU_OPTIONS_FIELD_NUMBER = 6;
private org.tensorflow.framework.GPUOptions gpuOptions_;
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public boolean hasGpuOptions() {
return gpuOptions_ != null;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public org.tensorflow.framework.GPUOptions getGpuOptions() {
return gpuOptions_ == null ? org.tensorflow.framework.GPUOptions.getDefaultInstance() : gpuOptions_;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public org.tensorflow.framework.GPUOptionsOrBuilder getGpuOptionsOrBuilder() {
return getGpuOptions();
}
public static final int ALLOW_SOFT_PLACEMENT_FIELD_NUMBER = 7;
private boolean allowSoftPlacement_;
/**
*
* Whether soft placement is allowed. If allow_soft_placement is true,
* an op will be placed on CPU if
* 1. there's no GPU implementation for the OP
* or
* 2. no GPU devices are known or registered
* or
* 3. need to co-locate with reftype input(s) which are from CPU.
*
*
* bool allow_soft_placement = 7;
*/
public boolean getAllowSoftPlacement() {
return allowSoftPlacement_;
}
public static final int LOG_DEVICE_PLACEMENT_FIELD_NUMBER = 8;
private boolean logDevicePlacement_;
/**
*
* Whether device placements should be logged.
*
*
* bool log_device_placement = 8;
*/
public boolean getLogDevicePlacement() {
return logDevicePlacement_;
}
public static final int GRAPH_OPTIONS_FIELD_NUMBER = 10;
private org.tensorflow.framework.GraphOptions graphOptions_;
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public boolean hasGraphOptions() {
return graphOptions_ != null;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public org.tensorflow.framework.GraphOptions getGraphOptions() {
return graphOptions_ == null ? org.tensorflow.framework.GraphOptions.getDefaultInstance() : graphOptions_;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public org.tensorflow.framework.GraphOptionsOrBuilder getGraphOptionsOrBuilder() {
return getGraphOptions();
}
public static final int OPERATION_TIMEOUT_IN_MS_FIELD_NUMBER = 11;
private long operationTimeoutInMs_;
/**
*
* Global timeout for all blocking operations in this session. If non-zero,
* and not overridden on a per-operation basis, this value will be used as the
* deadline for all blocking operations.
*
*
* int64 operation_timeout_in_ms = 11;
*/
public long getOperationTimeoutInMs() {
return operationTimeoutInMs_;
}
public static final int RPC_OPTIONS_FIELD_NUMBER = 13;
private org.tensorflow.framework.RPCOptions rpcOptions_;
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public boolean hasRpcOptions() {
return rpcOptions_ != null;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public org.tensorflow.framework.RPCOptions getRpcOptions() {
return rpcOptions_ == null ? org.tensorflow.framework.RPCOptions.getDefaultInstance() : rpcOptions_;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public org.tensorflow.framework.RPCOptionsOrBuilder getRpcOptionsOrBuilder() {
return getRpcOptions();
}
public static final int CLUSTER_DEF_FIELD_NUMBER = 14;
private org.tensorflow.distruntime.ClusterDef clusterDef_;
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public boolean hasClusterDef() {
return clusterDef_ != null;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public org.tensorflow.distruntime.ClusterDef getClusterDef() {
return clusterDef_ == null ? org.tensorflow.distruntime.ClusterDef.getDefaultInstance() : clusterDef_;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public org.tensorflow.distruntime.ClusterDefOrBuilder getClusterDefOrBuilder() {
return getClusterDef();
}
public static final int ISOLATE_SESSION_STATE_FIELD_NUMBER = 15;
private boolean isolateSessionState_;
/**
*
* If true, any resources such as Variables used in the session will not be
* shared with other sessions.
*
*
* bool isolate_session_state = 15;
*/
public boolean getIsolateSessionState() {
return isolateSessionState_;
}
public static final int EXPERIMENTAL_FIELD_NUMBER = 16;
private org.tensorflow.framework.ConfigProto.Experimental experimental_;
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public boolean hasExperimental() {
return experimental_ != null;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public org.tensorflow.framework.ConfigProto.Experimental getExperimental() {
return experimental_ == null ? org.tensorflow.framework.ConfigProto.Experimental.getDefaultInstance() : experimental_;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder getExperimentalOrBuilder() {
return getExperimental();
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
com.google.protobuf.GeneratedMessageV3
.serializeStringMapTo(
output,
internalGetDeviceCount(),
DeviceCountDefaultEntryHolder.defaultEntry,
1);
if (intraOpParallelismThreads_ != 0) {
output.writeInt32(2, intraOpParallelismThreads_);
}
if (placementPeriod_ != 0) {
output.writeInt32(3, placementPeriod_);
}
for (int i = 0; i < deviceFilters_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 4, deviceFilters_.getRaw(i));
}
if (interOpParallelismThreads_ != 0) {
output.writeInt32(5, interOpParallelismThreads_);
}
if (gpuOptions_ != null) {
output.writeMessage(6, getGpuOptions());
}
if (allowSoftPlacement_ != false) {
output.writeBool(7, allowSoftPlacement_);
}
if (logDevicePlacement_ != false) {
output.writeBool(8, logDevicePlacement_);
}
if (usePerSessionThreads_ != false) {
output.writeBool(9, usePerSessionThreads_);
}
if (graphOptions_ != null) {
output.writeMessage(10, getGraphOptions());
}
if (operationTimeoutInMs_ != 0L) {
output.writeInt64(11, operationTimeoutInMs_);
}
for (int i = 0; i < sessionInterOpThreadPool_.size(); i++) {
output.writeMessage(12, sessionInterOpThreadPool_.get(i));
}
if (rpcOptions_ != null) {
output.writeMessage(13, getRpcOptions());
}
if (clusterDef_ != null) {
output.writeMessage(14, getClusterDef());
}
if (isolateSessionState_ != false) {
output.writeBool(15, isolateSessionState_);
}
if (experimental_ != null) {
output.writeMessage(16, getExperimental());
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (java.util.Map.Entry entry
: internalGetDeviceCount().getMap().entrySet()) {
com.google.protobuf.MapEntry
deviceCount__ = DeviceCountDefaultEntryHolder.defaultEntry.newBuilderForType()
.setKey(entry.getKey())
.setValue(entry.getValue())
.build();
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, deviceCount__);
}
if (intraOpParallelismThreads_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, intraOpParallelismThreads_);
}
if (placementPeriod_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, placementPeriod_);
}
{
int dataSize = 0;
for (int i = 0; i < deviceFilters_.size(); i++) {
dataSize += computeStringSizeNoTag(deviceFilters_.getRaw(i));
}
size += dataSize;
size += 1 * getDeviceFiltersList().size();
}
if (interOpParallelismThreads_ != 0) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(5, interOpParallelismThreads_);
}
if (gpuOptions_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, getGpuOptions());
}
if (allowSoftPlacement_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(7, allowSoftPlacement_);
}
if (logDevicePlacement_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, logDevicePlacement_);
}
if (usePerSessionThreads_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(9, usePerSessionThreads_);
}
if (graphOptions_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, getGraphOptions());
}
if (operationTimeoutInMs_ != 0L) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(11, operationTimeoutInMs_);
}
for (int i = 0; i < sessionInterOpThreadPool_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(12, sessionInterOpThreadPool_.get(i));
}
if (rpcOptions_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(13, getRpcOptions());
}
if (clusterDef_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(14, getClusterDef());
}
if (isolateSessionState_ != false) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(15, isolateSessionState_);
}
if (experimental_ != null) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(16, getExperimental());
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.tensorflow.framework.ConfigProto)) {
return super.equals(obj);
}
org.tensorflow.framework.ConfigProto other = (org.tensorflow.framework.ConfigProto) obj;
boolean result = true;
result = result && internalGetDeviceCount().equals(
other.internalGetDeviceCount());
result = result && (getIntraOpParallelismThreads()
== other.getIntraOpParallelismThreads());
result = result && (getInterOpParallelismThreads()
== other.getInterOpParallelismThreads());
result = result && (getUsePerSessionThreads()
== other.getUsePerSessionThreads());
result = result && getSessionInterOpThreadPoolList()
.equals(other.getSessionInterOpThreadPoolList());
result = result && (getPlacementPeriod()
== other.getPlacementPeriod());
result = result && getDeviceFiltersList()
.equals(other.getDeviceFiltersList());
result = result && (hasGpuOptions() == other.hasGpuOptions());
if (hasGpuOptions()) {
result = result && getGpuOptions()
.equals(other.getGpuOptions());
}
result = result && (getAllowSoftPlacement()
== other.getAllowSoftPlacement());
result = result && (getLogDevicePlacement()
== other.getLogDevicePlacement());
result = result && (hasGraphOptions() == other.hasGraphOptions());
if (hasGraphOptions()) {
result = result && getGraphOptions()
.equals(other.getGraphOptions());
}
result = result && (getOperationTimeoutInMs()
== other.getOperationTimeoutInMs());
result = result && (hasRpcOptions() == other.hasRpcOptions());
if (hasRpcOptions()) {
result = result && getRpcOptions()
.equals(other.getRpcOptions());
}
result = result && (hasClusterDef() == other.hasClusterDef());
if (hasClusterDef()) {
result = result && getClusterDef()
.equals(other.getClusterDef());
}
result = result && (getIsolateSessionState()
== other.getIsolateSessionState());
result = result && (hasExperimental() == other.hasExperimental());
if (hasExperimental()) {
result = result && getExperimental()
.equals(other.getExperimental());
}
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (!internalGetDeviceCount().getMap().isEmpty()) {
hash = (37 * hash) + DEVICE_COUNT_FIELD_NUMBER;
hash = (53 * hash) + internalGetDeviceCount().hashCode();
}
hash = (37 * hash) + INTRA_OP_PARALLELISM_THREADS_FIELD_NUMBER;
hash = (53 * hash) + getIntraOpParallelismThreads();
hash = (37 * hash) + INTER_OP_PARALLELISM_THREADS_FIELD_NUMBER;
hash = (53 * hash) + getInterOpParallelismThreads();
hash = (37 * hash) + USE_PER_SESSION_THREADS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getUsePerSessionThreads());
if (getSessionInterOpThreadPoolCount() > 0) {
hash = (37 * hash) + SESSION_INTER_OP_THREAD_POOL_FIELD_NUMBER;
hash = (53 * hash) + getSessionInterOpThreadPoolList().hashCode();
}
hash = (37 * hash) + PLACEMENT_PERIOD_FIELD_NUMBER;
hash = (53 * hash) + getPlacementPeriod();
if (getDeviceFiltersCount() > 0) {
hash = (37 * hash) + DEVICE_FILTERS_FIELD_NUMBER;
hash = (53 * hash) + getDeviceFiltersList().hashCode();
}
if (hasGpuOptions()) {
hash = (37 * hash) + GPU_OPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getGpuOptions().hashCode();
}
hash = (37 * hash) + ALLOW_SOFT_PLACEMENT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowSoftPlacement());
hash = (37 * hash) + LOG_DEVICE_PLACEMENT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getLogDevicePlacement());
if (hasGraphOptions()) {
hash = (37 * hash) + GRAPH_OPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getGraphOptions().hashCode();
}
hash = (37 * hash) + OPERATION_TIMEOUT_IN_MS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getOperationTimeoutInMs());
if (hasRpcOptions()) {
hash = (37 * hash) + RPC_OPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getRpcOptions().hashCode();
}
if (hasClusterDef()) {
hash = (37 * hash) + CLUSTER_DEF_FIELD_NUMBER;
hash = (53 * hash) + getClusterDef().hashCode();
}
hash = (37 * hash) + ISOLATE_SESSION_STATE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getIsolateSessionState());
if (hasExperimental()) {
hash = (37 * hash) + EXPERIMENTAL_FIELD_NUMBER;
hash = (53 * hash) + getExperimental().hashCode();
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.tensorflow.framework.ConfigProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static org.tensorflow.framework.ConfigProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(org.tensorflow.framework.ConfigProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Session configuration parameters.
* The system picks appropriate values for fields that are not set.
*
*
* Protobuf type {@code tensorflow.ConfigProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:tensorflow.ConfigProto)
org.tensorflow.framework.ConfigProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_descriptor;
}
@SuppressWarnings({"rawtypes"})
protected com.google.protobuf.MapField internalGetMapField(
int number) {
switch (number) {
case 1:
return internalGetDeviceCount();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
@SuppressWarnings({"rawtypes"})
protected com.google.protobuf.MapField internalGetMutableMapField(
int number) {
switch (number) {
case 1:
return internalGetMutableDeviceCount();
default:
throw new RuntimeException(
"Invalid map field number: " + number);
}
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.tensorflow.framework.ConfigProto.class, org.tensorflow.framework.ConfigProto.Builder.class);
}
// Construct using org.tensorflow.framework.ConfigProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getSessionInterOpThreadPoolFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
internalGetMutableDeviceCount().clear();
intraOpParallelismThreads_ = 0;
interOpParallelismThreads_ = 0;
usePerSessionThreads_ = false;
if (sessionInterOpThreadPoolBuilder_ == null) {
sessionInterOpThreadPool_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
sessionInterOpThreadPoolBuilder_.clear();
}
placementPeriod_ = 0;
deviceFilters_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000040);
if (gpuOptionsBuilder_ == null) {
gpuOptions_ = null;
} else {
gpuOptions_ = null;
gpuOptionsBuilder_ = null;
}
allowSoftPlacement_ = false;
logDevicePlacement_ = false;
if (graphOptionsBuilder_ == null) {
graphOptions_ = null;
} else {
graphOptions_ = null;
graphOptionsBuilder_ = null;
}
operationTimeoutInMs_ = 0L;
if (rpcOptionsBuilder_ == null) {
rpcOptions_ = null;
} else {
rpcOptions_ = null;
rpcOptionsBuilder_ = null;
}
if (clusterDefBuilder_ == null) {
clusterDef_ = null;
} else {
clusterDef_ = null;
clusterDefBuilder_ = null;
}
isolateSessionState_ = false;
if (experimentalBuilder_ == null) {
experimental_ = null;
} else {
experimental_ = null;
experimentalBuilder_ = null;
}
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.tensorflow.framework.ConfigProtos.internal_static_tensorflow_ConfigProto_descriptor;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto getDefaultInstanceForType() {
return org.tensorflow.framework.ConfigProto.getDefaultInstance();
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto build() {
org.tensorflow.framework.ConfigProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto buildPartial() {
org.tensorflow.framework.ConfigProto result = new org.tensorflow.framework.ConfigProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
result.deviceCount_ = internalGetDeviceCount();
result.deviceCount_.makeImmutable();
result.intraOpParallelismThreads_ = intraOpParallelismThreads_;
result.interOpParallelismThreads_ = interOpParallelismThreads_;
result.usePerSessionThreads_ = usePerSessionThreads_;
if (sessionInterOpThreadPoolBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
sessionInterOpThreadPool_ = java.util.Collections.unmodifiableList(sessionInterOpThreadPool_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.sessionInterOpThreadPool_ = sessionInterOpThreadPool_;
} else {
result.sessionInterOpThreadPool_ = sessionInterOpThreadPoolBuilder_.build();
}
result.placementPeriod_ = placementPeriod_;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
deviceFilters_ = deviceFilters_.getUnmodifiableView();
bitField0_ = (bitField0_ & ~0x00000040);
}
result.deviceFilters_ = deviceFilters_;
if (gpuOptionsBuilder_ == null) {
result.gpuOptions_ = gpuOptions_;
} else {
result.gpuOptions_ = gpuOptionsBuilder_.build();
}
result.allowSoftPlacement_ = allowSoftPlacement_;
result.logDevicePlacement_ = logDevicePlacement_;
if (graphOptionsBuilder_ == null) {
result.graphOptions_ = graphOptions_;
} else {
result.graphOptions_ = graphOptionsBuilder_.build();
}
result.operationTimeoutInMs_ = operationTimeoutInMs_;
if (rpcOptionsBuilder_ == null) {
result.rpcOptions_ = rpcOptions_;
} else {
result.rpcOptions_ = rpcOptionsBuilder_.build();
}
if (clusterDefBuilder_ == null) {
result.clusterDef_ = clusterDef_;
} else {
result.clusterDef_ = clusterDefBuilder_.build();
}
result.isolateSessionState_ = isolateSessionState_;
if (experimentalBuilder_ == null) {
result.experimental_ = experimental_;
} else {
result.experimental_ = experimentalBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.tensorflow.framework.ConfigProto) {
return mergeFrom((org.tensorflow.framework.ConfigProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.tensorflow.framework.ConfigProto other) {
if (other == org.tensorflow.framework.ConfigProto.getDefaultInstance()) return this;
internalGetMutableDeviceCount().mergeFrom(
other.internalGetDeviceCount());
if (other.getIntraOpParallelismThreads() != 0) {
setIntraOpParallelismThreads(other.getIntraOpParallelismThreads());
}
if (other.getInterOpParallelismThreads() != 0) {
setInterOpParallelismThreads(other.getInterOpParallelismThreads());
}
if (other.getUsePerSessionThreads() != false) {
setUsePerSessionThreads(other.getUsePerSessionThreads());
}
if (sessionInterOpThreadPoolBuilder_ == null) {
if (!other.sessionInterOpThreadPool_.isEmpty()) {
if (sessionInterOpThreadPool_.isEmpty()) {
sessionInterOpThreadPool_ = other.sessionInterOpThreadPool_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.addAll(other.sessionInterOpThreadPool_);
}
onChanged();
}
} else {
if (!other.sessionInterOpThreadPool_.isEmpty()) {
if (sessionInterOpThreadPoolBuilder_.isEmpty()) {
sessionInterOpThreadPoolBuilder_.dispose();
sessionInterOpThreadPoolBuilder_ = null;
sessionInterOpThreadPool_ = other.sessionInterOpThreadPool_;
bitField0_ = (bitField0_ & ~0x00000010);
sessionInterOpThreadPoolBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getSessionInterOpThreadPoolFieldBuilder() : null;
} else {
sessionInterOpThreadPoolBuilder_.addAllMessages(other.sessionInterOpThreadPool_);
}
}
}
if (other.getPlacementPeriod() != 0) {
setPlacementPeriod(other.getPlacementPeriod());
}
if (!other.deviceFilters_.isEmpty()) {
if (deviceFilters_.isEmpty()) {
deviceFilters_ = other.deviceFilters_;
bitField0_ = (bitField0_ & ~0x00000040);
} else {
ensureDeviceFiltersIsMutable();
deviceFilters_.addAll(other.deviceFilters_);
}
onChanged();
}
if (other.hasGpuOptions()) {
mergeGpuOptions(other.getGpuOptions());
}
if (other.getAllowSoftPlacement() != false) {
setAllowSoftPlacement(other.getAllowSoftPlacement());
}
if (other.getLogDevicePlacement() != false) {
setLogDevicePlacement(other.getLogDevicePlacement());
}
if (other.hasGraphOptions()) {
mergeGraphOptions(other.getGraphOptions());
}
if (other.getOperationTimeoutInMs() != 0L) {
setOperationTimeoutInMs(other.getOperationTimeoutInMs());
}
if (other.hasRpcOptions()) {
mergeRpcOptions(other.getRpcOptions());
}
if (other.hasClusterDef()) {
mergeClusterDef(other.getClusterDef());
}
if (other.getIsolateSessionState() != false) {
setIsolateSessionState(other.getIsolateSessionState());
}
if (other.hasExperimental()) {
mergeExperimental(other.getExperimental());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.tensorflow.framework.ConfigProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.tensorflow.framework.ConfigProto) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private com.google.protobuf.MapField<
java.lang.String, java.lang.Integer> deviceCount_;
private com.google.protobuf.MapField
internalGetDeviceCount() {
if (deviceCount_ == null) {
return com.google.protobuf.MapField.emptyMapField(
DeviceCountDefaultEntryHolder.defaultEntry);
}
return deviceCount_;
}
private com.google.protobuf.MapField
internalGetMutableDeviceCount() {
onChanged();;
if (deviceCount_ == null) {
deviceCount_ = com.google.protobuf.MapField.newMapField(
DeviceCountDefaultEntryHolder.defaultEntry);
}
if (!deviceCount_.isMutable()) {
deviceCount_ = deviceCount_.copy();
}
return deviceCount_;
}
public int getDeviceCountCount() {
return internalGetDeviceCount().getMap().size();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public boolean containsDeviceCount(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
return internalGetDeviceCount().getMap().containsKey(key);
}
/**
* Use {@link #getDeviceCountMap()} instead.
*/
@java.lang.Deprecated
public java.util.Map getDeviceCount() {
return getDeviceCountMap();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public java.util.Map getDeviceCountMap() {
return internalGetDeviceCount().getMap();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public int getDeviceCountOrDefault(
java.lang.String key,
int defaultValue) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetDeviceCount().getMap();
return map.containsKey(key) ? map.get(key) : defaultValue;
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public int getDeviceCountOrThrow(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
java.util.Map map =
internalGetDeviceCount().getMap();
if (!map.containsKey(key)) {
throw new java.lang.IllegalArgumentException();
}
return map.get(key);
}
public Builder clearDeviceCount() {
internalGetMutableDeviceCount().getMutableMap()
.clear();
return this;
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public Builder removeDeviceCount(
java.lang.String key) {
if (key == null) { throw new java.lang.NullPointerException(); }
internalGetMutableDeviceCount().getMutableMap()
.remove(key);
return this;
}
/**
* Use alternate mutation accessors instead.
*/
@java.lang.Deprecated
public java.util.Map
getMutableDeviceCount() {
return internalGetMutableDeviceCount().getMutableMap();
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public Builder putDeviceCount(
java.lang.String key,
int value) {
if (key == null) { throw new java.lang.NullPointerException(); }
internalGetMutableDeviceCount().getMutableMap()
.put(key, value);
return this;
}
/**
*
* Map from device type name (e.g., "CPU" or "GPU" ) to maximum
* number of devices of that type to use. If a particular device
* type is not found in the map, the system picks an appropriate
* number.
*
*
* map<string, int32> device_count = 1;
*/
public Builder putAllDeviceCount(
java.util.Map values) {
internalGetMutableDeviceCount().getMutableMap()
.putAll(values);
return this;
}
private int intraOpParallelismThreads_ ;
/**
*
* The execution of an individual op (for some op types) can be
* parallelized on a pool of intra_op_parallelism_threads.
* 0 means the system picks an appropriate number.
*
*
* int32 intra_op_parallelism_threads = 2;
*/
public int getIntraOpParallelismThreads() {
return intraOpParallelismThreads_;
}
/**
*
* The execution of an individual op (for some op types) can be
* parallelized on a pool of intra_op_parallelism_threads.
* 0 means the system picks an appropriate number.
*
*
* int32 intra_op_parallelism_threads = 2;
*/
public Builder setIntraOpParallelismThreads(int value) {
intraOpParallelismThreads_ = value;
onChanged();
return this;
}
/**
*
* The execution of an individual op (for some op types) can be
* parallelized on a pool of intra_op_parallelism_threads.
* 0 means the system picks an appropriate number.
*
*
* int32 intra_op_parallelism_threads = 2;
*/
public Builder clearIntraOpParallelismThreads() {
intraOpParallelismThreads_ = 0;
onChanged();
return this;
}
private int interOpParallelismThreads_ ;
/**
*
* Nodes that perform blocking operations are enqueued on a pool of
* inter_op_parallelism_threads available in each process.
* 0 means the system picks an appropriate number.
* Note that the first Session created in the process sets the
* number of threads for all future sessions unless use_per_session_threads is
* true or session_inter_op_thread_pool is configured.
*
*
* int32 inter_op_parallelism_threads = 5;
*/
public int getInterOpParallelismThreads() {
return interOpParallelismThreads_;
}
/**
*
* Nodes that perform blocking operations are enqueued on a pool of
* inter_op_parallelism_threads available in each process.
* 0 means the system picks an appropriate number.
* Note that the first Session created in the process sets the
* number of threads for all future sessions unless use_per_session_threads is
* true or session_inter_op_thread_pool is configured.
*
*
* int32 inter_op_parallelism_threads = 5;
*/
public Builder setInterOpParallelismThreads(int value) {
interOpParallelismThreads_ = value;
onChanged();
return this;
}
/**
*
* Nodes that perform blocking operations are enqueued on a pool of
* inter_op_parallelism_threads available in each process.
* 0 means the system picks an appropriate number.
* Note that the first Session created in the process sets the
* number of threads for all future sessions unless use_per_session_threads is
* true or session_inter_op_thread_pool is configured.
*
*
* int32 inter_op_parallelism_threads = 5;
*/
public Builder clearInterOpParallelismThreads() {
interOpParallelismThreads_ = 0;
onChanged();
return this;
}
private boolean usePerSessionThreads_ ;
/**
*
* If true, use a new set of threads for this session rather than the global
* pool of threads. Only supported by direct sessions.
* If false, use the global threads created by the first session, or the
* per-session thread pools configured by session_inter_op_thread_pool.
* This option is deprecated. The same effect can be achieved by setting
* session_inter_op_thread_pool to have one element, whose num_threads equals
* inter_op_parallelism_threads.
*
*
* bool use_per_session_threads = 9;
*/
public boolean getUsePerSessionThreads() {
return usePerSessionThreads_;
}
/**
*
* If true, use a new set of threads for this session rather than the global
* pool of threads. Only supported by direct sessions.
* If false, use the global threads created by the first session, or the
* per-session thread pools configured by session_inter_op_thread_pool.
* This option is deprecated. The same effect can be achieved by setting
* session_inter_op_thread_pool to have one element, whose num_threads equals
* inter_op_parallelism_threads.
*
*
* bool use_per_session_threads = 9;
*/
public Builder setUsePerSessionThreads(boolean value) {
usePerSessionThreads_ = value;
onChanged();
return this;
}
/**
*
* If true, use a new set of threads for this session rather than the global
* pool of threads. Only supported by direct sessions.
* If false, use the global threads created by the first session, or the
* per-session thread pools configured by session_inter_op_thread_pool.
* This option is deprecated. The same effect can be achieved by setting
* session_inter_op_thread_pool to have one element, whose num_threads equals
* inter_op_parallelism_threads.
*
*
* bool use_per_session_threads = 9;
*/
public Builder clearUsePerSessionThreads() {
usePerSessionThreads_ = false;
onChanged();
return this;
}
private java.util.List sessionInterOpThreadPool_ =
java.util.Collections.emptyList();
private void ensureSessionInterOpThreadPoolIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
sessionInterOpThreadPool_ = new java.util.ArrayList(sessionInterOpThreadPool_);
bitField0_ |= 0x00000010;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.ThreadPoolOptionProto, org.tensorflow.framework.ThreadPoolOptionProto.Builder, org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder> sessionInterOpThreadPoolBuilder_;
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public java.util.List getSessionInterOpThreadPoolList() {
if (sessionInterOpThreadPoolBuilder_ == null) {
return java.util.Collections.unmodifiableList(sessionInterOpThreadPool_);
} else {
return sessionInterOpThreadPoolBuilder_.getMessageList();
}
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public int getSessionInterOpThreadPoolCount() {
if (sessionInterOpThreadPoolBuilder_ == null) {
return sessionInterOpThreadPool_.size();
} else {
return sessionInterOpThreadPoolBuilder_.getCount();
}
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProto getSessionInterOpThreadPool(int index) {
if (sessionInterOpThreadPoolBuilder_ == null) {
return sessionInterOpThreadPool_.get(index);
} else {
return sessionInterOpThreadPoolBuilder_.getMessage(index);
}
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder setSessionInterOpThreadPool(
int index, org.tensorflow.framework.ThreadPoolOptionProto value) {
if (sessionInterOpThreadPoolBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.set(index, value);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder setSessionInterOpThreadPool(
int index, org.tensorflow.framework.ThreadPoolOptionProto.Builder builderForValue) {
if (sessionInterOpThreadPoolBuilder_ == null) {
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.set(index, builderForValue.build());
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder addSessionInterOpThreadPool(org.tensorflow.framework.ThreadPoolOptionProto value) {
if (sessionInterOpThreadPoolBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.add(value);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.addMessage(value);
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder addSessionInterOpThreadPool(
int index, org.tensorflow.framework.ThreadPoolOptionProto value) {
if (sessionInterOpThreadPoolBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.add(index, value);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder addSessionInterOpThreadPool(
org.tensorflow.framework.ThreadPoolOptionProto.Builder builderForValue) {
if (sessionInterOpThreadPoolBuilder_ == null) {
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.add(builderForValue.build());
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder addSessionInterOpThreadPool(
int index, org.tensorflow.framework.ThreadPoolOptionProto.Builder builderForValue) {
if (sessionInterOpThreadPoolBuilder_ == null) {
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.add(index, builderForValue.build());
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder addAllSessionInterOpThreadPool(
java.lang.Iterable extends org.tensorflow.framework.ThreadPoolOptionProto> values) {
if (sessionInterOpThreadPoolBuilder_ == null) {
ensureSessionInterOpThreadPoolIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, sessionInterOpThreadPool_);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.addAllMessages(values);
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder clearSessionInterOpThreadPool() {
if (sessionInterOpThreadPoolBuilder_ == null) {
sessionInterOpThreadPool_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.clear();
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public Builder removeSessionInterOpThreadPool(int index) {
if (sessionInterOpThreadPoolBuilder_ == null) {
ensureSessionInterOpThreadPoolIsMutable();
sessionInterOpThreadPool_.remove(index);
onChanged();
} else {
sessionInterOpThreadPoolBuilder_.remove(index);
}
return this;
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProto.Builder getSessionInterOpThreadPoolBuilder(
int index) {
return getSessionInterOpThreadPoolFieldBuilder().getBuilder(index);
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder getSessionInterOpThreadPoolOrBuilder(
int index) {
if (sessionInterOpThreadPoolBuilder_ == null) {
return sessionInterOpThreadPool_.get(index); } else {
return sessionInterOpThreadPoolBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public java.util.List extends org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder>
getSessionInterOpThreadPoolOrBuilderList() {
if (sessionInterOpThreadPoolBuilder_ != null) {
return sessionInterOpThreadPoolBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(sessionInterOpThreadPool_);
}
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProto.Builder addSessionInterOpThreadPoolBuilder() {
return getSessionInterOpThreadPoolFieldBuilder().addBuilder(
org.tensorflow.framework.ThreadPoolOptionProto.getDefaultInstance());
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public org.tensorflow.framework.ThreadPoolOptionProto.Builder addSessionInterOpThreadPoolBuilder(
int index) {
return getSessionInterOpThreadPoolFieldBuilder().addBuilder(
index, org.tensorflow.framework.ThreadPoolOptionProto.getDefaultInstance());
}
/**
*
* This option is experimental - it may be replaced with a different mechanism
* in the future.
* Configures session thread pools. If this is configured, then RunOptions for
* a Run call can select the thread pool to use.
* The intended use is for when some session invocations need to run in a
* background pool limited to a small number of threads:
* - For example, a session may be configured to have one large pool (for
* regular compute) and one small pool (for periodic, low priority work);
* using the small pool is currently the mechanism for limiting the inter-op
* parallelism of the low priority work. Note that it does not limit the
* parallelism of work spawned by a single op kernel implementation.
* - Using this setting is normally not needed in training, but may help some
* serving use cases.
* - It is also generally recommended to set the global_name field of this
* proto, to avoid creating multiple large pools. It is typically better to
* run the non-low-priority work, even across sessions, in a single large
* pool.
*
*
* repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;
*/
public java.util.List
getSessionInterOpThreadPoolBuilderList() {
return getSessionInterOpThreadPoolFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.ThreadPoolOptionProto, org.tensorflow.framework.ThreadPoolOptionProto.Builder, org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder>
getSessionInterOpThreadPoolFieldBuilder() {
if (sessionInterOpThreadPoolBuilder_ == null) {
sessionInterOpThreadPoolBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
org.tensorflow.framework.ThreadPoolOptionProto, org.tensorflow.framework.ThreadPoolOptionProto.Builder, org.tensorflow.framework.ThreadPoolOptionProtoOrBuilder>(
sessionInterOpThreadPool_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
sessionInterOpThreadPool_ = null;
}
return sessionInterOpThreadPoolBuilder_;
}
private int placementPeriod_ ;
/**
*
* Assignment of Nodes to Devices is recomputed every placement_period
* steps until the system warms up (at which point the recomputation
* typically slows down automatically).
*
*
* int32 placement_period = 3;
*/
public int getPlacementPeriod() {
return placementPeriod_;
}
/**
*
* Assignment of Nodes to Devices is recomputed every placement_period
* steps until the system warms up (at which point the recomputation
* typically slows down automatically).
*
*
* int32 placement_period = 3;
*/
public Builder setPlacementPeriod(int value) {
placementPeriod_ = value;
onChanged();
return this;
}
/**
*
* Assignment of Nodes to Devices is recomputed every placement_period
* steps until the system warms up (at which point the recomputation
* typically slows down automatically).
*
*
* int32 placement_period = 3;
*/
public Builder clearPlacementPeriod() {
placementPeriod_ = 0;
onChanged();
return this;
}
private com.google.protobuf.LazyStringList deviceFilters_ = com.google.protobuf.LazyStringArrayList.EMPTY;
private void ensureDeviceFiltersIsMutable() {
if (!((bitField0_ & 0x00000040) == 0x00000040)) {
deviceFilters_ = new com.google.protobuf.LazyStringArrayList(deviceFilters_);
bitField0_ |= 0x00000040;
}
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public com.google.protobuf.ProtocolStringList
getDeviceFiltersList() {
return deviceFilters_.getUnmodifiableView();
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public int getDeviceFiltersCount() {
return deviceFilters_.size();
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public java.lang.String getDeviceFilters(int index) {
return deviceFilters_.get(index);
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public com.google.protobuf.ByteString
getDeviceFiltersBytes(int index) {
return deviceFilters_.getByteString(index);
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public Builder setDeviceFilters(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDeviceFiltersIsMutable();
deviceFilters_.set(index, value);
onChanged();
return this;
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public Builder addDeviceFilters(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureDeviceFiltersIsMutable();
deviceFilters_.add(value);
onChanged();
return this;
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public Builder addAllDeviceFilters(
java.lang.Iterable values) {
ensureDeviceFiltersIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, deviceFilters_);
onChanged();
return this;
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public Builder clearDeviceFilters() {
deviceFilters_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000040);
onChanged();
return this;
}
/**
*
* When any filters are present sessions will ignore all devices which do not
* match the filters. Each filter can be partially specified, e.g. "/job:ps"
* "/job:worker/replica:3", etc.
*
*
* repeated string device_filters = 4;
*/
public Builder addDeviceFiltersBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);
ensureDeviceFiltersIsMutable();
deviceFilters_.add(value);
onChanged();
return this;
}
private org.tensorflow.framework.GPUOptions gpuOptions_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions, org.tensorflow.framework.GPUOptions.Builder, org.tensorflow.framework.GPUOptionsOrBuilder> gpuOptionsBuilder_;
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public boolean hasGpuOptions() {
return gpuOptionsBuilder_ != null || gpuOptions_ != null;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public org.tensorflow.framework.GPUOptions getGpuOptions() {
if (gpuOptionsBuilder_ == null) {
return gpuOptions_ == null ? org.tensorflow.framework.GPUOptions.getDefaultInstance() : gpuOptions_;
} else {
return gpuOptionsBuilder_.getMessage();
}
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public Builder setGpuOptions(org.tensorflow.framework.GPUOptions value) {
if (gpuOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
gpuOptions_ = value;
onChanged();
} else {
gpuOptionsBuilder_.setMessage(value);
}
return this;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public Builder setGpuOptions(
org.tensorflow.framework.GPUOptions.Builder builderForValue) {
if (gpuOptionsBuilder_ == null) {
gpuOptions_ = builderForValue.build();
onChanged();
} else {
gpuOptionsBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public Builder mergeGpuOptions(org.tensorflow.framework.GPUOptions value) {
if (gpuOptionsBuilder_ == null) {
if (gpuOptions_ != null) {
gpuOptions_ =
org.tensorflow.framework.GPUOptions.newBuilder(gpuOptions_).mergeFrom(value).buildPartial();
} else {
gpuOptions_ = value;
}
onChanged();
} else {
gpuOptionsBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public Builder clearGpuOptions() {
if (gpuOptionsBuilder_ == null) {
gpuOptions_ = null;
onChanged();
} else {
gpuOptions_ = null;
gpuOptionsBuilder_ = null;
}
return this;
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public org.tensorflow.framework.GPUOptions.Builder getGpuOptionsBuilder() {
onChanged();
return getGpuOptionsFieldBuilder().getBuilder();
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
public org.tensorflow.framework.GPUOptionsOrBuilder getGpuOptionsOrBuilder() {
if (gpuOptionsBuilder_ != null) {
return gpuOptionsBuilder_.getMessageOrBuilder();
} else {
return gpuOptions_ == null ?
org.tensorflow.framework.GPUOptions.getDefaultInstance() : gpuOptions_;
}
}
/**
*
* Options that apply to all GPUs.
*
*
* .tensorflow.GPUOptions gpu_options = 6;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions, org.tensorflow.framework.GPUOptions.Builder, org.tensorflow.framework.GPUOptionsOrBuilder>
getGpuOptionsFieldBuilder() {
if (gpuOptionsBuilder_ == null) {
gpuOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GPUOptions, org.tensorflow.framework.GPUOptions.Builder, org.tensorflow.framework.GPUOptionsOrBuilder>(
getGpuOptions(),
getParentForChildren(),
isClean());
gpuOptions_ = null;
}
return gpuOptionsBuilder_;
}
private boolean allowSoftPlacement_ ;
/**
*
* Whether soft placement is allowed. If allow_soft_placement is true,
* an op will be placed on CPU if
* 1. there's no GPU implementation for the OP
* or
* 2. no GPU devices are known or registered
* or
* 3. need to co-locate with reftype input(s) which are from CPU.
*
*
* bool allow_soft_placement = 7;
*/
public boolean getAllowSoftPlacement() {
return allowSoftPlacement_;
}
/**
*
* Whether soft placement is allowed. If allow_soft_placement is true,
* an op will be placed on CPU if
* 1. there's no GPU implementation for the OP
* or
* 2. no GPU devices are known or registered
* or
* 3. need to co-locate with reftype input(s) which are from CPU.
*
*
* bool allow_soft_placement = 7;
*/
public Builder setAllowSoftPlacement(boolean value) {
allowSoftPlacement_ = value;
onChanged();
return this;
}
/**
*
* Whether soft placement is allowed. If allow_soft_placement is true,
* an op will be placed on CPU if
* 1. there's no GPU implementation for the OP
* or
* 2. no GPU devices are known or registered
* or
* 3. need to co-locate with reftype input(s) which are from CPU.
*
*
* bool allow_soft_placement = 7;
*/
public Builder clearAllowSoftPlacement() {
allowSoftPlacement_ = false;
onChanged();
return this;
}
private boolean logDevicePlacement_ ;
/**
*
* Whether device placements should be logged.
*
*
* bool log_device_placement = 8;
*/
public boolean getLogDevicePlacement() {
return logDevicePlacement_;
}
/**
*
* Whether device placements should be logged.
*
*
* bool log_device_placement = 8;
*/
public Builder setLogDevicePlacement(boolean value) {
logDevicePlacement_ = value;
onChanged();
return this;
}
/**
*
* Whether device placements should be logged.
*
*
* bool log_device_placement = 8;
*/
public Builder clearLogDevicePlacement() {
logDevicePlacement_ = false;
onChanged();
return this;
}
private org.tensorflow.framework.GraphOptions graphOptions_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GraphOptions, org.tensorflow.framework.GraphOptions.Builder, org.tensorflow.framework.GraphOptionsOrBuilder> graphOptionsBuilder_;
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public boolean hasGraphOptions() {
return graphOptionsBuilder_ != null || graphOptions_ != null;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public org.tensorflow.framework.GraphOptions getGraphOptions() {
if (graphOptionsBuilder_ == null) {
return graphOptions_ == null ? org.tensorflow.framework.GraphOptions.getDefaultInstance() : graphOptions_;
} else {
return graphOptionsBuilder_.getMessage();
}
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public Builder setGraphOptions(org.tensorflow.framework.GraphOptions value) {
if (graphOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
graphOptions_ = value;
onChanged();
} else {
graphOptionsBuilder_.setMessage(value);
}
return this;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public Builder setGraphOptions(
org.tensorflow.framework.GraphOptions.Builder builderForValue) {
if (graphOptionsBuilder_ == null) {
graphOptions_ = builderForValue.build();
onChanged();
} else {
graphOptionsBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public Builder mergeGraphOptions(org.tensorflow.framework.GraphOptions value) {
if (graphOptionsBuilder_ == null) {
if (graphOptions_ != null) {
graphOptions_ =
org.tensorflow.framework.GraphOptions.newBuilder(graphOptions_).mergeFrom(value).buildPartial();
} else {
graphOptions_ = value;
}
onChanged();
} else {
graphOptionsBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public Builder clearGraphOptions() {
if (graphOptionsBuilder_ == null) {
graphOptions_ = null;
onChanged();
} else {
graphOptions_ = null;
graphOptionsBuilder_ = null;
}
return this;
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public org.tensorflow.framework.GraphOptions.Builder getGraphOptionsBuilder() {
onChanged();
return getGraphOptionsFieldBuilder().getBuilder();
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
public org.tensorflow.framework.GraphOptionsOrBuilder getGraphOptionsOrBuilder() {
if (graphOptionsBuilder_ != null) {
return graphOptionsBuilder_.getMessageOrBuilder();
} else {
return graphOptions_ == null ?
org.tensorflow.framework.GraphOptions.getDefaultInstance() : graphOptions_;
}
}
/**
*
* Options that apply to all graphs.
*
*
* .tensorflow.GraphOptions graph_options = 10;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GraphOptions, org.tensorflow.framework.GraphOptions.Builder, org.tensorflow.framework.GraphOptionsOrBuilder>
getGraphOptionsFieldBuilder() {
if (graphOptionsBuilder_ == null) {
graphOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.GraphOptions, org.tensorflow.framework.GraphOptions.Builder, org.tensorflow.framework.GraphOptionsOrBuilder>(
getGraphOptions(),
getParentForChildren(),
isClean());
graphOptions_ = null;
}
return graphOptionsBuilder_;
}
private long operationTimeoutInMs_ ;
/**
*
* Global timeout for all blocking operations in this session. If non-zero,
* and not overridden on a per-operation basis, this value will be used as the
* deadline for all blocking operations.
*
*
* int64 operation_timeout_in_ms = 11;
*/
public long getOperationTimeoutInMs() {
return operationTimeoutInMs_;
}
/**
*
* Global timeout for all blocking operations in this session. If non-zero,
* and not overridden on a per-operation basis, this value will be used as the
* deadline for all blocking operations.
*
*
* int64 operation_timeout_in_ms = 11;
*/
public Builder setOperationTimeoutInMs(long value) {
operationTimeoutInMs_ = value;
onChanged();
return this;
}
/**
*
* Global timeout for all blocking operations in this session. If non-zero,
* and not overridden on a per-operation basis, this value will be used as the
* deadline for all blocking operations.
*
*
* int64 operation_timeout_in_ms = 11;
*/
public Builder clearOperationTimeoutInMs() {
operationTimeoutInMs_ = 0L;
onChanged();
return this;
}
private org.tensorflow.framework.RPCOptions rpcOptions_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RPCOptions, org.tensorflow.framework.RPCOptions.Builder, org.tensorflow.framework.RPCOptionsOrBuilder> rpcOptionsBuilder_;
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public boolean hasRpcOptions() {
return rpcOptionsBuilder_ != null || rpcOptions_ != null;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public org.tensorflow.framework.RPCOptions getRpcOptions() {
if (rpcOptionsBuilder_ == null) {
return rpcOptions_ == null ? org.tensorflow.framework.RPCOptions.getDefaultInstance() : rpcOptions_;
} else {
return rpcOptionsBuilder_.getMessage();
}
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public Builder setRpcOptions(org.tensorflow.framework.RPCOptions value) {
if (rpcOptionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
rpcOptions_ = value;
onChanged();
} else {
rpcOptionsBuilder_.setMessage(value);
}
return this;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public Builder setRpcOptions(
org.tensorflow.framework.RPCOptions.Builder builderForValue) {
if (rpcOptionsBuilder_ == null) {
rpcOptions_ = builderForValue.build();
onChanged();
} else {
rpcOptionsBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public Builder mergeRpcOptions(org.tensorflow.framework.RPCOptions value) {
if (rpcOptionsBuilder_ == null) {
if (rpcOptions_ != null) {
rpcOptions_ =
org.tensorflow.framework.RPCOptions.newBuilder(rpcOptions_).mergeFrom(value).buildPartial();
} else {
rpcOptions_ = value;
}
onChanged();
} else {
rpcOptionsBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public Builder clearRpcOptions() {
if (rpcOptionsBuilder_ == null) {
rpcOptions_ = null;
onChanged();
} else {
rpcOptions_ = null;
rpcOptionsBuilder_ = null;
}
return this;
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public org.tensorflow.framework.RPCOptions.Builder getRpcOptionsBuilder() {
onChanged();
return getRpcOptionsFieldBuilder().getBuilder();
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
public org.tensorflow.framework.RPCOptionsOrBuilder getRpcOptionsOrBuilder() {
if (rpcOptionsBuilder_ != null) {
return rpcOptionsBuilder_.getMessageOrBuilder();
} else {
return rpcOptions_ == null ?
org.tensorflow.framework.RPCOptions.getDefaultInstance() : rpcOptions_;
}
}
/**
*
* Options that apply when this session uses the distributed runtime.
*
*
* .tensorflow.RPCOptions rpc_options = 13;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RPCOptions, org.tensorflow.framework.RPCOptions.Builder, org.tensorflow.framework.RPCOptionsOrBuilder>
getRpcOptionsFieldBuilder() {
if (rpcOptionsBuilder_ == null) {
rpcOptionsBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.RPCOptions, org.tensorflow.framework.RPCOptions.Builder, org.tensorflow.framework.RPCOptionsOrBuilder>(
getRpcOptions(),
getParentForChildren(),
isClean());
rpcOptions_ = null;
}
return rpcOptionsBuilder_;
}
private org.tensorflow.distruntime.ClusterDef clusterDef_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.distruntime.ClusterDef, org.tensorflow.distruntime.ClusterDef.Builder, org.tensorflow.distruntime.ClusterDefOrBuilder> clusterDefBuilder_;
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public boolean hasClusterDef() {
return clusterDefBuilder_ != null || clusterDef_ != null;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public org.tensorflow.distruntime.ClusterDef getClusterDef() {
if (clusterDefBuilder_ == null) {
return clusterDef_ == null ? org.tensorflow.distruntime.ClusterDef.getDefaultInstance() : clusterDef_;
} else {
return clusterDefBuilder_.getMessage();
}
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public Builder setClusterDef(org.tensorflow.distruntime.ClusterDef value) {
if (clusterDefBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
clusterDef_ = value;
onChanged();
} else {
clusterDefBuilder_.setMessage(value);
}
return this;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public Builder setClusterDef(
org.tensorflow.distruntime.ClusterDef.Builder builderForValue) {
if (clusterDefBuilder_ == null) {
clusterDef_ = builderForValue.build();
onChanged();
} else {
clusterDefBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public Builder mergeClusterDef(org.tensorflow.distruntime.ClusterDef value) {
if (clusterDefBuilder_ == null) {
if (clusterDef_ != null) {
clusterDef_ =
org.tensorflow.distruntime.ClusterDef.newBuilder(clusterDef_).mergeFrom(value).buildPartial();
} else {
clusterDef_ = value;
}
onChanged();
} else {
clusterDefBuilder_.mergeFrom(value);
}
return this;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public Builder clearClusterDef() {
if (clusterDefBuilder_ == null) {
clusterDef_ = null;
onChanged();
} else {
clusterDef_ = null;
clusterDefBuilder_ = null;
}
return this;
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public org.tensorflow.distruntime.ClusterDef.Builder getClusterDefBuilder() {
onChanged();
return getClusterDefFieldBuilder().getBuilder();
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
public org.tensorflow.distruntime.ClusterDefOrBuilder getClusterDefOrBuilder() {
if (clusterDefBuilder_ != null) {
return clusterDefBuilder_.getMessageOrBuilder();
} else {
return clusterDef_ == null ?
org.tensorflow.distruntime.ClusterDef.getDefaultInstance() : clusterDef_;
}
}
/**
*
* Optional list of all workers to use in this session.
*
*
* .tensorflow.ClusterDef cluster_def = 14;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.distruntime.ClusterDef, org.tensorflow.distruntime.ClusterDef.Builder, org.tensorflow.distruntime.ClusterDefOrBuilder>
getClusterDefFieldBuilder() {
if (clusterDefBuilder_ == null) {
clusterDefBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.distruntime.ClusterDef, org.tensorflow.distruntime.ClusterDef.Builder, org.tensorflow.distruntime.ClusterDefOrBuilder>(
getClusterDef(),
getParentForChildren(),
isClean());
clusterDef_ = null;
}
return clusterDefBuilder_;
}
private boolean isolateSessionState_ ;
/**
*
* If true, any resources such as Variables used in the session will not be
* shared with other sessions.
*
*
* bool isolate_session_state = 15;
*/
public boolean getIsolateSessionState() {
return isolateSessionState_;
}
/**
*
* If true, any resources such as Variables used in the session will not be
* shared with other sessions.
*
*
* bool isolate_session_state = 15;
*/
public Builder setIsolateSessionState(boolean value) {
isolateSessionState_ = value;
onChanged();
return this;
}
/**
*
* If true, any resources such as Variables used in the session will not be
* shared with other sessions.
*
*
* bool isolate_session_state = 15;
*/
public Builder clearIsolateSessionState() {
isolateSessionState_ = false;
onChanged();
return this;
}
private org.tensorflow.framework.ConfigProto.Experimental experimental_ = null;
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto.Experimental, org.tensorflow.framework.ConfigProto.Experimental.Builder, org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder> experimentalBuilder_;
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public boolean hasExperimental() {
return experimentalBuilder_ != null || experimental_ != null;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public org.tensorflow.framework.ConfigProto.Experimental getExperimental() {
if (experimentalBuilder_ == null) {
return experimental_ == null ? org.tensorflow.framework.ConfigProto.Experimental.getDefaultInstance() : experimental_;
} else {
return experimentalBuilder_.getMessage();
}
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public Builder setExperimental(org.tensorflow.framework.ConfigProto.Experimental value) {
if (experimentalBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
experimental_ = value;
onChanged();
} else {
experimentalBuilder_.setMessage(value);
}
return this;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public Builder setExperimental(
org.tensorflow.framework.ConfigProto.Experimental.Builder builderForValue) {
if (experimentalBuilder_ == null) {
experimental_ = builderForValue.build();
onChanged();
} else {
experimentalBuilder_.setMessage(builderForValue.build());
}
return this;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public Builder mergeExperimental(org.tensorflow.framework.ConfigProto.Experimental value) {
if (experimentalBuilder_ == null) {
if (experimental_ != null) {
experimental_ =
org.tensorflow.framework.ConfigProto.Experimental.newBuilder(experimental_).mergeFrom(value).buildPartial();
} else {
experimental_ = value;
}
onChanged();
} else {
experimentalBuilder_.mergeFrom(value);
}
return this;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public Builder clearExperimental() {
if (experimentalBuilder_ == null) {
experimental_ = null;
onChanged();
} else {
experimental_ = null;
experimentalBuilder_ = null;
}
return this;
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public org.tensorflow.framework.ConfigProto.Experimental.Builder getExperimentalBuilder() {
onChanged();
return getExperimentalFieldBuilder().getBuilder();
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
public org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder getExperimentalOrBuilder() {
if (experimentalBuilder_ != null) {
return experimentalBuilder_.getMessageOrBuilder();
} else {
return experimental_ == null ?
org.tensorflow.framework.ConfigProto.Experimental.getDefaultInstance() : experimental_;
}
}
/**
* .tensorflow.ConfigProto.Experimental experimental = 16;
*/
private com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto.Experimental, org.tensorflow.framework.ConfigProto.Experimental.Builder, org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder>
getExperimentalFieldBuilder() {
if (experimentalBuilder_ == null) {
experimentalBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
org.tensorflow.framework.ConfigProto.Experimental, org.tensorflow.framework.ConfigProto.Experimental.Builder, org.tensorflow.framework.ConfigProto.ExperimentalOrBuilder>(
getExperimental(),
getParentForChildren(),
isClean());
experimental_ = null;
}
return experimentalBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.ConfigProto)
}
// @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
private static final org.tensorflow.framework.ConfigProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.ConfigProto();
}
public static org.tensorflow.framework.ConfigProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public ConfigProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ConfigProto(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.ConfigProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}