Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: onnx.proto
// Protobuf Java Version: 3.25.3
package onnx;
public final class Onnx {
private Onnx() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
/**
*
* Versioning
*
* ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
*
* To be compatible with both proto2 and proto3, we will use a version number
* that is not defined by the default value but an explicit enum number.
*
*
* Protobuf enum {@code onnx.Version}
*/
public enum Version
implements com.google.protobuf.ProtocolMessageEnum {
/**
*
* proto3 requires the first enum value to be zero.
* We add this just to appease the compiler.
*
* The version field is always serialized and we will use it to store the
* version that the graph is generated from. This helps us set up version
* control.
* For the IR, we are using simple numbers starting with 0x00000001,
* which was the version we published on Oct 10, 2017.
*
* IR VERSION 3 published on Nov 3, 2017
* - For operator versioning:
* - Added new message OperatorSetIdProto
* - Added opset_import in ModelProto
* - For vendor extensions, added domain in NodeProto
*
* IR VERSION 5 published on March 18, 2019
* - Add message TensorAnnotation.
* - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
*
* IR VERSION 6 published on Sep 19, 2019
* - Add support for sparse tensor constants stored in model.
* - Add message SparseTensorProto
* - Add sparse initializers
*
* IR VERSION 7 published on May 8, 2020
* - Add support to allow function body graph to rely on multiple external opreator sets.
* - Add a list to promote inference graph's initializers to global and
* mutable variables. Global variables are visible in all graphs of the
* stored models.
* - Add message TrainingInfoProto to store initialization
* method and training algorithm. The execution of TrainingInfoProto
* can modify the values of mutable variables.
* - Implicitly add inference graph into each TrainingInfoProto's algorithm.
*
* IR VERSION 8 published on July 30, 2021
* Introduce TypeProto.SparseTensor
* Introduce TypeProto.Optional
* Added a list of FunctionProtos local to the model
* Deprecated since_version and operator status from FunctionProto
*
* IR VERSION 9 published on May 5, 2023
* Added AttributeProto to FunctionProto so that default attribute values can be set.
* Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ.
*
*
* IR_VERSION = 9;
*/
IR_VERSION(9),
;
/**
*
* proto3 requires the first enum value to be zero.
* We add this just to appease the compiler.
*
*
* _START_VERSION = 0;
*/
public static final int _START_VERSION_VALUE = 0;
/**
*
* The version field is always serialized and we will use it to store the
* version that the graph is generated from. This helps us set up version
* control.
* For the IR, we are using simple numbers starting with 0x00000001,
* which was the version we published on Oct 10, 2017.
*
*
* IR_VERSION_2017_10_10 = 1;
*/
public static final int IR_VERSION_2017_10_10_VALUE = 1;
/**
*
* IR_VERSION 2 published on Oct 30, 2017
* - Added type discriminator to AttributeProto to support proto3 users
*
*
* IR_VERSION_2017_10_30 = 2;
*/
public static final int IR_VERSION_2017_10_30_VALUE = 2;
/**
*
* IR VERSION 3 published on Nov 3, 2017
* - For operator versioning:
* - Added new message OperatorSetIdProto
* - Added opset_import in ModelProto
* - For vendor extensions, added domain in NodeProto
*
*
* IR_VERSION_2017_11_3 = 3;
*/
public static final int IR_VERSION_2017_11_3_VALUE = 3;
/**
*
* IR VERSION 4 published on Jan 22, 2019
* - Relax constraint that initializers should be a subset of graph inputs
* - Add type BFLOAT16
*
*
* IR_VERSION_2019_1_22 = 4;
*/
public static final int IR_VERSION_2019_1_22_VALUE = 4;
/**
*
* IR VERSION 5 published on March 18, 2019
* - Add message TensorAnnotation.
* - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
*
*
* IR_VERSION_2019_3_18 = 5;
*/
public static final int IR_VERSION_2019_3_18_VALUE = 5;
/**
*
* IR VERSION 6 published on Sep 19, 2019
* - Add support for sparse tensor constants stored in model.
* - Add message SparseTensorProto
* - Add sparse initializers
*
*
* IR_VERSION_2019_9_19 = 6;
*/
public static final int IR_VERSION_2019_9_19_VALUE = 6;
/**
*
* IR VERSION 7 published on May 8, 2020
* - Add support to allow function body graph to rely on multiple external opreator sets.
* - Add a list to promote inference graph's initializers to global and
* mutable variables. Global variables are visible in all graphs of the
* stored models.
* - Add message TrainingInfoProto to store initialization
* method and training algorithm. The execution of TrainingInfoProto
* can modify the values of mutable variables.
* - Implicitly add inference graph into each TrainingInfoProto's algorithm.
*
*
* IR_VERSION_2020_5_8 = 7;
*/
public static final int IR_VERSION_2020_5_8_VALUE = 7;
/**
*
* IR VERSION 8 published on July 30, 2021
* Introduce TypeProto.SparseTensor
* Introduce TypeProto.Optional
* Added a list of FunctionProtos local to the model
* Deprecated since_version and operator status from FunctionProto
*
*
* IR_VERSION_2021_7_30 = 8;
*/
public static final int IR_VERSION_2021_7_30_VALUE = 8;
/**
*
* IR VERSION 9 published on May 5, 2023
* Added AttributeProto to FunctionProto so that default attribute values can be set.
* Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ.
*
*
* IR_VERSION = 9;
*/
public static final int IR_VERSION_VALUE = 9;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static Version valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static Version forNumber(int value) {
switch (value) {
case 0: return _START_VERSION;
case 1: return IR_VERSION_2017_10_10;
case 2: return IR_VERSION_2017_10_30;
case 3: return IR_VERSION_2017_11_3;
case 4: return IR_VERSION_2019_1_22;
case 5: return IR_VERSION_2019_3_18;
case 6: return IR_VERSION_2019_9_19;
case 7: return IR_VERSION_2020_5_8;
case 8: return IR_VERSION_2021_7_30;
case 9: return IR_VERSION;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
Version> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Version findValueByNumber(int number) {
return Version.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return onnx.Onnx.getDescriptor().getEnumTypes().get(0);
}
private static final Version[] VALUES = values();
public static Version valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private Version(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:onnx.Version)
}
/**
*
* Operator/function status.
*
*
* Protobuf enum {@code onnx.OperatorStatus}
*/
public enum OperatorStatus
implements com.google.protobuf.ProtocolMessageEnum {
/**
* EXPERIMENTAL = 0;
*/
EXPERIMENTAL(0),
/**
* STABLE = 1;
*/
STABLE(1),
;
/**
* EXPERIMENTAL = 0;
*/
public static final int EXPERIMENTAL_VALUE = 0;
/**
* STABLE = 1;
*/
public static final int STABLE_VALUE = 1;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static OperatorStatus valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static OperatorStatus forNumber(int value) {
switch (value) {
case 0: return EXPERIMENTAL;
case 1: return STABLE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
OperatorStatus> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public OperatorStatus findValueByNumber(int number) {
return OperatorStatus.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return onnx.Onnx.getDescriptor().getEnumTypes().get(1);
}
private static final OperatorStatus[] VALUES = values();
public static OperatorStatus valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private OperatorStatus(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:onnx.OperatorStatus)
}
public interface AttributeProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.AttributeProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return Whether the name field is set.
*/
boolean hasName();
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The name.
*/
java.lang.String getName();
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The bytes for name.
*/
com.google.protobuf.ByteString
getNameBytes();
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return Whether the refAttrName field is set.
*/
boolean hasRefAttrName();
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return The bytes for refAttrName.
*/
com.google.protobuf.ByteString
getRefAttrNameBytes();
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @return Whether the docString field is set.
*/
boolean hasDocString();
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @return The bytes for docString.
*/
com.google.protobuf.ByteString
getDocStringBytes();
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return Whether the type field is set.
*/
boolean hasType();
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return The type.
*/
onnx.Onnx.AttributeProto.AttributeType getType();
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return Whether the f field is set.
*/
boolean hasF();
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return The f.
*/
float getF();
/**
*
* int
*
*
* optional int64 i = 3;
* @return Whether the i field is set.
*/
boolean hasI();
/**
*
* int
*
*
* optional int64 i = 3;
* @return The i.
*/
long getI();
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return Whether the s field is set.
*/
boolean hasS();
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return The s.
*/
com.google.protobuf.ByteString getS();
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
* @return Whether the t field is set.
*/
boolean hasT();
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
* @return The t.
*/
onnx.Onnx.TensorProto getT();
/**
*
*
* repeated float floats = 7;
* @return A list containing the floats.
*/
java.util.List getFloatsList();
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @return The count of floats.
*/
int getFloatsCount();
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param index The index of the element to return.
* @return The floats at the given index.
*/
float getFloats(int index);
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @return A list containing the ints.
*/
java.util.List getIntsList();
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @return The count of ints.
*/
int getIntsCount();
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param index The index of the element to return.
* @return The ints at the given index.
*/
long getInts(int index);
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @return A list containing the strings.
*/
java.util.List getStringsList();
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @return The count of strings.
*/
int getStringsCount();
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param index The index of the element to return.
* @return The strings at the given index.
*/
com.google.protobuf.ByteString getStrings(int index);
/**
*
* Attributes
*
* A named attribute containing either singular float, integer, string, graph,
* and tensor values, or repeated float, integer, string, graph, and tensor values.
* An AttributeProto MUST contain the name field, and *only one* of the
* following content fields, effectively enforcing a C/C++ union equivalent.
*
* Note: this enum is structurally identical to the OpSchema::AttrType
* enum defined in schema.h. If you rev one, you likely need to rev the other.
*
*
* Protobuf enum {@code onnx.AttributeProto.AttributeType}
*/
public enum AttributeType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNDEFINED = 0;
*/
UNDEFINED(0),
/**
* FLOAT = 1;
*/
FLOAT(1),
/**
* INT = 2;
*/
INT(2),
/**
* STRING = 3;
*/
STRING(3),
/**
* TENSOR = 4;
*/
TENSOR(4),
/**
* GRAPH = 5;
*/
GRAPH(5),
/**
* SPARSE_TENSOR = 11;
*/
SPARSE_TENSOR(11),
/**
* TYPE_PROTO = 13;
*/
TYPE_PROTO(13),
/**
* FLOATS = 6;
*/
FLOATS(6),
/**
* INTS = 7;
*/
INTS(7),
/**
* STRINGS = 8;
*/
STRINGS(8),
/**
* TENSORS = 9;
*/
TENSORS(9),
/**
* GRAPHS = 10;
*/
GRAPHS(10),
/**
* SPARSE_TENSORS = 12;
*/
SPARSE_TENSORS(12),
/**
* TYPE_PROTOS = 14;
*/
TYPE_PROTOS(14),
;
/**
* UNDEFINED = 0;
*/
public static final int UNDEFINED_VALUE = 0;
/**
* FLOAT = 1;
*/
public static final int FLOAT_VALUE = 1;
/**
* INT = 2;
*/
public static final int INT_VALUE = 2;
/**
* STRING = 3;
*/
public static final int STRING_VALUE = 3;
/**
* TENSOR = 4;
*/
public static final int TENSOR_VALUE = 4;
/**
* GRAPH = 5;
*/
public static final int GRAPH_VALUE = 5;
/**
* SPARSE_TENSOR = 11;
*/
public static final int SPARSE_TENSOR_VALUE = 11;
/**
* TYPE_PROTO = 13;
*/
public static final int TYPE_PROTO_VALUE = 13;
/**
* FLOATS = 6;
*/
public static final int FLOATS_VALUE = 6;
/**
* INTS = 7;
*/
public static final int INTS_VALUE = 7;
/**
* STRINGS = 8;
*/
public static final int STRINGS_VALUE = 8;
/**
* TENSORS = 9;
*/
public static final int TENSORS_VALUE = 9;
/**
* GRAPHS = 10;
*/
public static final int GRAPHS_VALUE = 10;
/**
* SPARSE_TENSORS = 12;
*/
public static final int SPARSE_TENSORS_VALUE = 12;
/**
* TYPE_PROTOS = 14;
*/
public static final int TYPE_PROTOS_VALUE = 14;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static AttributeType valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static AttributeType forNumber(int value) {
switch (value) {
case 0: return UNDEFINED;
case 1: return FLOAT;
case 2: return INT;
case 3: return STRING;
case 4: return TENSOR;
case 5: return GRAPH;
case 11: return SPARSE_TENSOR;
case 13: return TYPE_PROTO;
case 6: return FLOATS;
case 7: return INTS;
case 8: return STRINGS;
case 9: return TENSORS;
case 10: return GRAPHS;
case 12: return SPARSE_TENSORS;
case 14: return TYPE_PROTOS;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
AttributeType> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public AttributeType findValueByNumber(int number) {
return AttributeType.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return onnx.Onnx.AttributeProto.getDescriptor().getEnumTypes().get(0);
}
private static final AttributeType[] VALUES = values();
public static AttributeType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private AttributeType(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:onnx.AttributeProto.AttributeType)
}
private int bitField0_;
public static final int NAME_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private volatile java.lang.Object name_ = "";
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return Whether the name field is set.
*/
@java.lang.Override
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The name.
*/
@java.lang.Override
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The bytes for name.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int REF_ATTR_NAME_FIELD_NUMBER = 21;
@SuppressWarnings("serial")
private volatile java.lang.Object refAttrName_ = "";
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return Whether the refAttrName field is set.
*/
@java.lang.Override
public boolean hasRefAttrName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return The bytes for refAttrName.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getRefAttrNameBytes() {
java.lang.Object ref = refAttrName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
refAttrName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DOC_STRING_FIELD_NUMBER = 13;
@SuppressWarnings("serial")
private volatile java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @return Whether the docString field is set.
*/
@java.lang.Override
public boolean hasDocString() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @return The bytes for docString.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDocStringBytes() {
java.lang.Object ref = docString_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
docString_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int TYPE_FIELD_NUMBER = 20;
private int type_ = 0;
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return Whether the type field is set.
*/
@java.lang.Override public boolean hasType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return The type.
*/
@java.lang.Override public onnx.Onnx.AttributeProto.AttributeType getType() {
onnx.Onnx.AttributeProto.AttributeType result = onnx.Onnx.AttributeProto.AttributeType.forNumber(type_);
return result == null ? onnx.Onnx.AttributeProto.AttributeType.UNDEFINED : result;
}
public static final int F_FIELD_NUMBER = 2;
private float f_ = 0F;
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return Whether the f field is set.
*/
@java.lang.Override
public boolean hasF() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return The f.
*/
@java.lang.Override
public float getF() {
return f_;
}
public static final int I_FIELD_NUMBER = 3;
private long i_ = 0L;
/**
*
* int
*
*
* optional int64 i = 3;
* @return Whether the i field is set.
*/
@java.lang.Override
public boolean hasI() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
* int
*
*
* optional int64 i = 3;
* @return The i.
*/
@java.lang.Override
public long getI() {
return i_;
}
public static final int S_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString s_ = com.google.protobuf.ByteString.EMPTY;
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return Whether the s field is set.
*/
@java.lang.Override
public boolean hasS() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return The s.
*/
@java.lang.Override
public com.google.protobuf.ByteString getS() {
return s_;
}
public static final int T_FIELD_NUMBER = 5;
private onnx.Onnx.TensorProto t_;
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
* @return Whether the t field is set.
*/
@java.lang.Override
public boolean hasT() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
* @return The t.
*/
@java.lang.Override
public onnx.Onnx.TensorProto getT() {
return t_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : t_;
}
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
*/
@java.lang.Override
public onnx.Onnx.TensorProtoOrBuilder getTOrBuilder() {
return t_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : t_;
}
public static final int G_FIELD_NUMBER = 6;
private onnx.Onnx.GraphProto g_;
/**
*
* graph
*
*
* optional .onnx.GraphProto g = 6;
* @return Whether the g field is set.
*/
@java.lang.Override
public boolean hasG() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
*
* graph
*
*
* optional .onnx.GraphProto g = 6;
* @return The g.
*/
@java.lang.Override
public onnx.Onnx.GraphProto getG() {
return g_ == null ? onnx.Onnx.GraphProto.getDefaultInstance() : g_;
}
/**
*
* graph
*
*
* optional .onnx.GraphProto g = 6;
*/
@java.lang.Override
public onnx.Onnx.GraphProtoOrBuilder getGOrBuilder() {
return g_ == null ? onnx.Onnx.GraphProto.getDefaultInstance() : g_;
}
public static final int SPARSE_TENSOR_FIELD_NUMBER = 22;
private onnx.Onnx.SparseTensorProto sparseTensor_;
/**
*
* sparse tensor value
*
*
* optional .onnx.SparseTensorProto sparse_tensor = 22;
* @return Whether the sparseTensor field is set.
*/
@java.lang.Override
public boolean hasSparseTensor() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
*
* Do not use field below, it's deprecated.
* optional ValueProto v = 12; // value - subsumes everything but graph
*
*
* optional .onnx.TypeProto tp = 14;
*/
@java.lang.Override
public onnx.Onnx.TypeProtoOrBuilder getTpOrBuilder() {
return tp_ == null ? onnx.Onnx.TypeProto.getDefaultInstance() : tp_;
}
public static final int FLOATS_FIELD_NUMBER = 7;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.FloatList floats_ =
emptyFloatList();
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @return A list containing the floats.
*/
@java.lang.Override
public java.util.List
getFloatsList() {
return floats_;
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @return The count of floats.
*/
public int getFloatsCount() {
return floats_.size();
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param index The index of the element to return.
* @return The floats at the given index.
*/
public float getFloats(int index) {
return floats_.getFloat(index);
}
public static final int INTS_FIELD_NUMBER = 8;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.LongList ints_ =
emptyLongList();
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @return A list containing the ints.
*/
@java.lang.Override
public java.util.List
getIntsList() {
return ints_;
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @return The count of ints.
*/
public int getIntsCount() {
return ints_.size();
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param index The index of the element to return.
* @return The ints at the given index.
*/
public long getInts(int index) {
return ints_.getLong(index);
}
public static final int STRINGS_FIELD_NUMBER = 9;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.ProtobufList strings_ =
emptyList(com.google.protobuf.ByteString.class);
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @return A list containing the strings.
*/
@java.lang.Override
public java.util.List
getStringsList() {
return strings_;
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @return The count of strings.
*/
public int getStringsCount() {
return strings_.size();
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param index The index of the element to return.
* @return The strings at the given index.
*/
public com.google.protobuf.ByteString getStrings(int index) {
return strings_.get(index);
}
public static final int TENSORS_FIELD_NUMBER = 10;
@SuppressWarnings("serial")
private java.util.List tensors_;
/**
*
*
* repeated .onnx.TypeProto type_protos = 15;
*/
@java.lang.Override
public onnx.Onnx.TypeProtoOrBuilder getTypeProtosOrBuilder(
int index) {
return typeProtos_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeFloat(2, f_);
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeInt64(3, i_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeBytes(4, s_);
}
if (((bitField0_ & 0x00000080) != 0)) {
output.writeMessage(5, getT());
}
if (((bitField0_ & 0x00000100) != 0)) {
output.writeMessage(6, getG());
}
for (int i = 0; i < floats_.size(); i++) {
output.writeFloat(7, floats_.getFloat(i));
}
for (int i = 0; i < ints_.size(); i++) {
output.writeInt64(8, ints_.getLong(i));
}
for (int i = 0; i < strings_.size(); i++) {
output.writeBytes(9, strings_.get(i));
}
for (int i = 0; i < tensors_.size(); i++) {
output.writeMessage(10, tensors_.get(i));
}
for (int i = 0; i < graphs_.size(); i++) {
output.writeMessage(11, graphs_.get(i));
}
if (((bitField0_ & 0x00000004) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 13, docString_);
}
if (((bitField0_ & 0x00000400) != 0)) {
output.writeMessage(14, getTp());
}
for (int i = 0; i < typeProtos_.size(); i++) {
output.writeMessage(15, typeProtos_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
output.writeEnum(20, type_);
}
if (((bitField0_ & 0x00000002) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 21, refAttrName_);
}
if (((bitField0_ & 0x00000200) != 0)) {
output.writeMessage(22, getSparseTensor());
}
for (int i = 0; i < sparseTensors_.size(); i++) {
output.writeMessage(23, sparseTensors_.get(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(2, f_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(3, i_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, s_);
}
if (((bitField0_ & 0x00000080) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, getT());
}
if (((bitField0_ & 0x00000100) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, getG());
}
{
int dataSize = 0;
dataSize = 4 * getFloatsList().size();
size += dataSize;
size += 1 * getFloatsList().size();
}
{
int dataSize = 0;
for (int i = 0; i < ints_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt64SizeNoTag(ints_.getLong(i));
}
size += dataSize;
size += 1 * getIntsList().size();
}
{
int dataSize = 0;
for (int i = 0; i < strings_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(strings_.get(i));
}
size += dataSize;
size += 1 * getStringsList().size();
}
for (int i = 0; i < tensors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, tensors_.get(i));
}
for (int i = 0; i < graphs_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(11, graphs_.get(i));
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, docString_);
}
if (((bitField0_ & 0x00000400) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(14, getTp());
}
for (int i = 0; i < typeProtos_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, typeProtos_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(20, type_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(21, refAttrName_);
}
if (((bitField0_ & 0x00000200) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(22, getSparseTensor());
}
for (int i = 0; i < sparseTensors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(23, sparseTensors_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.AttributeProto)) {
return super.equals(obj);
}
onnx.Onnx.AttributeProto other = (onnx.Onnx.AttributeProto) obj;
if (hasName() != other.hasName()) return false;
if (hasName()) {
if (!getName()
.equals(other.getName())) return false;
}
if (hasRefAttrName() != other.hasRefAttrName()) return false;
if (hasRefAttrName()) {
if (!getRefAttrName()
.equals(other.getRefAttrName())) return false;
}
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (hasType() != other.hasType()) return false;
if (hasType()) {
if (type_ != other.type_) return false;
}
if (hasF() != other.hasF()) return false;
if (hasF()) {
if (java.lang.Float.floatToIntBits(getF())
!= java.lang.Float.floatToIntBits(
other.getF())) return false;
}
if (hasI() != other.hasI()) return false;
if (hasI()) {
if (getI()
!= other.getI()) return false;
}
if (hasS() != other.hasS()) return false;
if (hasS()) {
if (!getS()
.equals(other.getS())) return false;
}
if (hasT() != other.hasT()) return false;
if (hasT()) {
if (!getT()
.equals(other.getT())) return false;
}
if (hasG() != other.hasG()) return false;
if (hasG()) {
if (!getG()
.equals(other.getG())) return false;
}
if (hasSparseTensor() != other.hasSparseTensor()) return false;
if (hasSparseTensor()) {
if (!getSparseTensor()
.equals(other.getSparseTensor())) return false;
}
if (hasTp() != other.hasTp()) return false;
if (hasTp()) {
if (!getTp()
.equals(other.getTp())) return false;
}
if (!getFloatsList()
.equals(other.getFloatsList())) return false;
if (!getIntsList()
.equals(other.getIntsList())) return false;
if (!getStringsList()
.equals(other.getStringsList())) return false;
if (!getTensorsList()
.equals(other.getTensorsList())) return false;
if (!getGraphsList()
.equals(other.getGraphsList())) return false;
if (!getSparseTensorsList()
.equals(other.getSparseTensorsList())) return false;
if (!getTypeProtosList()
.equals(other.getTypeProtosList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasRefAttrName()) {
hash = (37 * hash) + REF_ATTR_NAME_FIELD_NUMBER;
hash = (53 * hash) + getRefAttrName().hashCode();
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + type_;
}
if (hasF()) {
hash = (37 * hash) + F_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getF());
}
if (hasI()) {
hash = (37 * hash) + I_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getI());
}
if (hasS()) {
hash = (37 * hash) + S_FIELD_NUMBER;
hash = (53 * hash) + getS().hashCode();
}
if (hasT()) {
hash = (37 * hash) + T_FIELD_NUMBER;
hash = (53 * hash) + getT().hashCode();
}
if (hasG()) {
hash = (37 * hash) + G_FIELD_NUMBER;
hash = (53 * hash) + getG().hashCode();
}
if (hasSparseTensor()) {
hash = (37 * hash) + SPARSE_TENSOR_FIELD_NUMBER;
hash = (53 * hash) + getSparseTensor().hashCode();
}
if (hasTp()) {
hash = (37 * hash) + TP_FIELD_NUMBER;
hash = (53 * hash) + getTp().hashCode();
}
if (getFloatsCount() > 0) {
hash = (37 * hash) + FLOATS_FIELD_NUMBER;
hash = (53 * hash) + getFloatsList().hashCode();
}
if (getIntsCount() > 0) {
hash = (37 * hash) + INTS_FIELD_NUMBER;
hash = (53 * hash) + getIntsList().hashCode();
}
if (getStringsCount() > 0) {
hash = (37 * hash) + STRINGS_FIELD_NUMBER;
hash = (53 * hash) + getStringsList().hashCode();
}
if (getTensorsCount() > 0) {
hash = (37 * hash) + TENSORS_FIELD_NUMBER;
hash = (53 * hash) + getTensorsList().hashCode();
}
if (getGraphsCount() > 0) {
hash = (37 * hash) + GRAPHS_FIELD_NUMBER;
hash = (53 * hash) + getGraphsList().hashCode();
}
if (getSparseTensorsCount() > 0) {
hash = (37 * hash) + SPARSE_TENSORS_FIELD_NUMBER;
hash = (53 * hash) + getSparseTensorsList().hashCode();
}
if (getTypeProtosCount() > 0) {
hash = (37 * hash) + TYPE_PROTOS_FIELD_NUMBER;
hash = (53 * hash) + getTypeProtosList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.AttributeProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.AttributeProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.AttributeProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.AttributeProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.AttributeProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.AttributeProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.AttributeProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.AttributeProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.AttributeProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.AttributeProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.AttributeProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.AttributeProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.AttributeProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Attributes
*
* A named attribute containing either singular float, integer, string, graph,
* and tensor values, or repeated float, integer, string, graph, and tensor values.
* An AttributeProto MUST contain the name field, and *only one* of the
* following content fields, effectively enforcing a C/C++ union equivalent.
*
*
* Protobuf type {@code onnx.AttributeProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.AttributeProto)
onnx.Onnx.AttributeProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_AttributeProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_AttributeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.AttributeProto.class, onnx.Onnx.AttributeProto.Builder.class);
}
// Construct using onnx.Onnx.AttributeProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getTFieldBuilder();
getGFieldBuilder();
getSparseTensorFieldBuilder();
getTpFieldBuilder();
getTensorsFieldBuilder();
getGraphsFieldBuilder();
getSparseTensorsFieldBuilder();
getTypeProtosFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
name_ = "";
refAttrName_ = "";
docString_ = "";
type_ = 0;
f_ = 0F;
i_ = 0L;
s_ = com.google.protobuf.ByteString.EMPTY;
t_ = null;
if (tBuilder_ != null) {
tBuilder_.dispose();
tBuilder_ = null;
}
g_ = null;
if (gBuilder_ != null) {
gBuilder_.dispose();
gBuilder_ = null;
}
sparseTensor_ = null;
if (sparseTensorBuilder_ != null) {
sparseTensorBuilder_.dispose();
sparseTensorBuilder_ = null;
}
tp_ = null;
if (tpBuilder_ != null) {
tpBuilder_.dispose();
tpBuilder_ = null;
}
floats_ = emptyFloatList();
ints_ = emptyLongList();
strings_ = emptyList(com.google.protobuf.ByteString.class);
if (tensorsBuilder_ == null) {
tensors_ = java.util.Collections.emptyList();
} else {
tensors_ = null;
tensorsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00004000);
if (graphsBuilder_ == null) {
graphs_ = java.util.Collections.emptyList();
} else {
graphs_ = null;
graphsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00008000);
if (sparseTensorsBuilder_ == null) {
sparseTensors_ = java.util.Collections.emptyList();
} else {
sparseTensors_ = null;
sparseTensorsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00010000);
if (typeProtosBuilder_ == null) {
typeProtos_ = java.util.Collections.emptyList();
} else {
typeProtos_ = null;
typeProtosBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00020000);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_AttributeProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.AttributeProto getDefaultInstanceForType() {
return onnx.Onnx.AttributeProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.AttributeProto build() {
onnx.Onnx.AttributeProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.AttributeProto buildPartial() {
onnx.Onnx.AttributeProto result = new onnx.Onnx.AttributeProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.AttributeProto result) {
if (tensorsBuilder_ == null) {
if (((bitField0_ & 0x00004000) != 0)) {
tensors_ = java.util.Collections.unmodifiableList(tensors_);
bitField0_ = (bitField0_ & ~0x00004000);
}
result.tensors_ = tensors_;
} else {
result.tensors_ = tensorsBuilder_.build();
}
if (graphsBuilder_ == null) {
if (((bitField0_ & 0x00008000) != 0)) {
graphs_ = java.util.Collections.unmodifiableList(graphs_);
bitField0_ = (bitField0_ & ~0x00008000);
}
result.graphs_ = graphs_;
} else {
result.graphs_ = graphsBuilder_.build();
}
if (sparseTensorsBuilder_ == null) {
if (((bitField0_ & 0x00010000) != 0)) {
sparseTensors_ = java.util.Collections.unmodifiableList(sparseTensors_);
bitField0_ = (bitField0_ & ~0x00010000);
}
result.sparseTensors_ = sparseTensors_;
} else {
result.sparseTensors_ = sparseTensorsBuilder_.build();
}
if (typeProtosBuilder_ == null) {
if (((bitField0_ & 0x00020000) != 0)) {
typeProtos_ = java.util.Collections.unmodifiableList(typeProtos_);
bitField0_ = (bitField0_ & ~0x00020000);
}
result.typeProtos_ = typeProtos_;
} else {
result.typeProtos_ = typeProtosBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.AttributeProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.name_ = name_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.refAttrName_ = refAttrName_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.docString_ = docString_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.type_ = type_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.f_ = f_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.i_ = i_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.s_ = s_;
to_bitField0_ |= 0x00000040;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.t_ = tBuilder_ == null
? t_
: tBuilder_.build();
to_bitField0_ |= 0x00000080;
}
if (((from_bitField0_ & 0x00000100) != 0)) {
result.g_ = gBuilder_ == null
? g_
: gBuilder_.build();
to_bitField0_ |= 0x00000100;
}
if (((from_bitField0_ & 0x00000200) != 0)) {
result.sparseTensor_ = sparseTensorBuilder_ == null
? sparseTensor_
: sparseTensorBuilder_.build();
to_bitField0_ |= 0x00000200;
}
if (((from_bitField0_ & 0x00000400) != 0)) {
result.tp_ = tpBuilder_ == null
? tp_
: tpBuilder_.build();
to_bitField0_ |= 0x00000400;
}
if (((from_bitField0_ & 0x00000800) != 0)) {
floats_.makeImmutable();
result.floats_ = floats_;
}
if (((from_bitField0_ & 0x00001000) != 0)) {
ints_.makeImmutable();
result.ints_ = ints_;
}
if (((from_bitField0_ & 0x00002000) != 0)) {
strings_.makeImmutable();
result.strings_ = strings_;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.AttributeProto) {
return mergeFrom((onnx.Onnx.AttributeProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.AttributeProto other) {
if (other == onnx.Onnx.AttributeProto.getDefaultInstance()) return this;
if (other.hasName()) {
name_ = other.name_;
bitField0_ |= 0x00000001;
onChanged();
}
if (other.hasRefAttrName()) {
refAttrName_ = other.refAttrName_;
bitField0_ |= 0x00000002;
onChanged();
}
if (other.hasDocString()) {
docString_ = other.docString_;
bitField0_ |= 0x00000004;
onChanged();
}
if (other.hasType()) {
setType(other.getType());
}
if (other.hasF()) {
setF(other.getF());
}
if (other.hasI()) {
setI(other.getI());
}
if (other.hasS()) {
setS(other.getS());
}
if (other.hasT()) {
mergeT(other.getT());
}
if (other.hasG()) {
mergeG(other.getG());
}
if (other.hasSparseTensor()) {
mergeSparseTensor(other.getSparseTensor());
}
if (other.hasTp()) {
mergeTp(other.getTp());
}
if (!other.floats_.isEmpty()) {
if (floats_.isEmpty()) {
floats_ = other.floats_;
floats_.makeImmutable();
bitField0_ |= 0x00000800;
} else {
ensureFloatsIsMutable();
floats_.addAll(other.floats_);
}
onChanged();
}
if (!other.ints_.isEmpty()) {
if (ints_.isEmpty()) {
ints_ = other.ints_;
ints_.makeImmutable();
bitField0_ |= 0x00001000;
} else {
ensureIntsIsMutable();
ints_.addAll(other.ints_);
}
onChanged();
}
if (!other.strings_.isEmpty()) {
if (strings_.isEmpty()) {
strings_ = other.strings_;
strings_.makeImmutable();
bitField0_ |= 0x00002000;
} else {
ensureStringsIsMutable();
strings_.addAll(other.strings_);
}
onChanged();
}
if (tensorsBuilder_ == null) {
if (!other.tensors_.isEmpty()) {
if (tensors_.isEmpty()) {
tensors_ = other.tensors_;
bitField0_ = (bitField0_ & ~0x00004000);
} else {
ensureTensorsIsMutable();
tensors_.addAll(other.tensors_);
}
onChanged();
}
} else {
if (!other.tensors_.isEmpty()) {
if (tensorsBuilder_.isEmpty()) {
tensorsBuilder_.dispose();
tensorsBuilder_ = null;
tensors_ = other.tensors_;
bitField0_ = (bitField0_ & ~0x00004000);
tensorsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTensorsFieldBuilder() : null;
} else {
tensorsBuilder_.addAllMessages(other.tensors_);
}
}
}
if (graphsBuilder_ == null) {
if (!other.graphs_.isEmpty()) {
if (graphs_.isEmpty()) {
graphs_ = other.graphs_;
bitField0_ = (bitField0_ & ~0x00008000);
} else {
ensureGraphsIsMutable();
graphs_.addAll(other.graphs_);
}
onChanged();
}
} else {
if (!other.graphs_.isEmpty()) {
if (graphsBuilder_.isEmpty()) {
graphsBuilder_.dispose();
graphsBuilder_ = null;
graphs_ = other.graphs_;
bitField0_ = (bitField0_ & ~0x00008000);
graphsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getGraphsFieldBuilder() : null;
} else {
graphsBuilder_.addAllMessages(other.graphs_);
}
}
}
if (sparseTensorsBuilder_ == null) {
if (!other.sparseTensors_.isEmpty()) {
if (sparseTensors_.isEmpty()) {
sparseTensors_ = other.sparseTensors_;
bitField0_ = (bitField0_ & ~0x00010000);
} else {
ensureSparseTensorsIsMutable();
sparseTensors_.addAll(other.sparseTensors_);
}
onChanged();
}
} else {
if (!other.sparseTensors_.isEmpty()) {
if (sparseTensorsBuilder_.isEmpty()) {
sparseTensorsBuilder_.dispose();
sparseTensorsBuilder_ = null;
sparseTensors_ = other.sparseTensors_;
bitField0_ = (bitField0_ & ~0x00010000);
sparseTensorsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getSparseTensorsFieldBuilder() : null;
} else {
sparseTensorsBuilder_.addAllMessages(other.sparseTensors_);
}
}
}
if (typeProtosBuilder_ == null) {
if (!other.typeProtos_.isEmpty()) {
if (typeProtos_.isEmpty()) {
typeProtos_ = other.typeProtos_;
bitField0_ = (bitField0_ & ~0x00020000);
} else {
ensureTypeProtosIsMutable();
typeProtos_.addAll(other.typeProtos_);
}
onChanged();
}
} else {
if (!other.typeProtos_.isEmpty()) {
if (typeProtosBuilder_.isEmpty()) {
typeProtosBuilder_.dispose();
typeProtosBuilder_ = null;
typeProtos_ = other.typeProtos_;
bitField0_ = (bitField0_ & ~0x00020000);
typeProtosBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTypeProtosFieldBuilder() : null;
} else {
typeProtosBuilder_.addAllMessages(other.typeProtos_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
name_ = input.readBytes();
bitField0_ |= 0x00000001;
break;
} // case 10
case 21: {
f_ = input.readFloat();
bitField0_ |= 0x00000010;
break;
} // case 21
case 24: {
i_ = input.readInt64();
bitField0_ |= 0x00000020;
break;
} // case 24
case 34: {
s_ = input.readBytes();
bitField0_ |= 0x00000040;
break;
} // case 34
case 42: {
input.readMessage(
getTFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000080;
break;
} // case 42
case 50: {
input.readMessage(
getGFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000100;
break;
} // case 50
case 61: {
float v = input.readFloat();
ensureFloatsIsMutable();
floats_.addFloat(v);
break;
} // case 61
case 58: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
int alloc = length > 4096 ? 4096 : length;
ensureFloatsIsMutable(alloc / 4);
while (input.getBytesUntilLimit() > 0) {
floats_.addFloat(input.readFloat());
}
input.popLimit(limit);
break;
} // case 58
case 64: {
long v = input.readInt64();
ensureIntsIsMutable();
ints_.addLong(v);
break;
} // case 64
case 66: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureIntsIsMutable();
while (input.getBytesUntilLimit() > 0) {
ints_.addLong(input.readInt64());
}
input.popLimit(limit);
break;
} // case 66
case 74: {
com.google.protobuf.ByteString v = input.readBytes();
ensureStringsIsMutable();
strings_.add(v);
break;
} // case 74
case 82: {
onnx.Onnx.TensorProto m =
input.readMessage(
onnx.Onnx.TensorProto.PARSER,
extensionRegistry);
if (tensorsBuilder_ == null) {
ensureTensorsIsMutable();
tensors_.add(m);
} else {
tensorsBuilder_.addMessage(m);
}
break;
} // case 82
case 90: {
onnx.Onnx.GraphProto m =
input.readMessage(
onnx.Onnx.GraphProto.PARSER,
extensionRegistry);
if (graphsBuilder_ == null) {
ensureGraphsIsMutable();
graphs_.add(m);
} else {
graphsBuilder_.addMessage(m);
}
break;
} // case 90
case 106: {
docString_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 106
case 114: {
input.readMessage(
getTpFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000400;
break;
} // case 114
case 122: {
onnx.Onnx.TypeProto m =
input.readMessage(
onnx.Onnx.TypeProto.PARSER,
extensionRegistry);
if (typeProtosBuilder_ == null) {
ensureTypeProtosIsMutable();
typeProtos_.add(m);
} else {
typeProtosBuilder_.addMessage(m);
}
break;
} // case 122
case 160: {
int tmpRaw = input.readEnum();
onnx.Onnx.AttributeProto.AttributeType tmpValue =
onnx.Onnx.AttributeProto.AttributeType.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(20, tmpRaw);
} else {
type_ = tmpRaw;
bitField0_ |= 0x00000008;
}
break;
} // case 160
case 170: {
refAttrName_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 170
case 178: {
input.readMessage(
getSparseTensorFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000200;
break;
} // case 178
case 186: {
onnx.Onnx.SparseTensorProto m =
input.readMessage(
onnx.Onnx.SparseTensorProto.PARSER,
extensionRegistry);
if (sparseTensorsBuilder_ == null) {
ensureSparseTensorsIsMutable();
sparseTensors_.add(m);
} else {
sparseTensorsBuilder_.addMessage(m);
}
break;
} // case 186
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private java.lang.Object name_ = "";
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return Whether the name field is set.
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The name.
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return The bytes for name.
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @param value The name to set.
* @return This builder for chaining.
*/
public Builder setName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @return This builder for chaining.
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
*
* The name field MUST be present for this version of the IR.
*
*
* optional string name = 1;
* @param value The bytes for name to set.
* @return This builder for chaining.
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private java.lang.Object refAttrName_ = "";
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return Whether the refAttrName field is set.
*/
public boolean hasRefAttrName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @return The bytes for refAttrName.
*/
public com.google.protobuf.ByteString
getRefAttrNameBytes() {
java.lang.Object ref = refAttrName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
refAttrName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @param value The refAttrName to set.
* @return This builder for chaining.
*/
public Builder setRefAttrName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
refAttrName_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
* if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
* In this case, this AttributeProto does not contain data, and it's a reference of attribute
* in parent scope.
* NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
*
*
* optional string ref_attr_name = 21;
* @param value The bytes for refAttrName to set.
* @return This builder for chaining.
*/
public Builder setRefAttrNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
refAttrName_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @return Whether the docString field is set.
*/
public boolean hasDocString() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
* A human-readable documentation for this attribute. Markdown is allowed.
*
*
* optional string doc_string = 13;
* @param value The bytes for docString to set.
* @return This builder for chaining.
*/
public Builder setDocStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
docString_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
private int type_ = 0;
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return Whether the type field is set.
*/
@java.lang.Override public boolean hasType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return The type.
*/
@java.lang.Override
public onnx.Onnx.AttributeProto.AttributeType getType() {
onnx.Onnx.AttributeProto.AttributeType result = onnx.Onnx.AttributeProto.AttributeType.forNumber(type_);
return result == null ? onnx.Onnx.AttributeProto.AttributeType.UNDEFINED : result;
}
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @param value The type to set.
* @return This builder for chaining.
*/
public Builder setType(onnx.Onnx.AttributeProto.AttributeType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
type_ = value.getNumber();
onChanged();
return this;
}
/**
*
* The type field MUST be present for this version of the IR.
* For 0.0.1 versions of the IR, this field was not defined, and
* implementations needed to use has_field heuristics to determine
* which value field was in use. For IR_VERSION 0.0.2 or later, this
* field MUST be set and match the f|i|s|t|... field in use. This
* change was made to accommodate proto3 implementations.
*
*
* optional .onnx.AttributeProto.AttributeType type = 20;
* @return This builder for chaining.
*/
public Builder clearType() {
bitField0_ = (bitField0_ & ~0x00000008);
type_ = 0;
onChanged();
return this;
}
private float f_ ;
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return Whether the f field is set.
*/
@java.lang.Override
public boolean hasF() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return The f.
*/
@java.lang.Override
public float getF() {
return f_;
}
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @param value The f to set.
* @return This builder for chaining.
*/
public Builder setF(float value) {
f_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
* Exactly ONE of the following fields must be present for this version of the IR
*
*
* optional float f = 2;
* @return This builder for chaining.
*/
public Builder clearF() {
bitField0_ = (bitField0_ & ~0x00000010);
f_ = 0F;
onChanged();
return this;
}
private long i_ ;
/**
*
* int
*
*
* optional int64 i = 3;
* @return Whether the i field is set.
*/
@java.lang.Override
public boolean hasI() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
* int
*
*
* optional int64 i = 3;
* @return The i.
*/
@java.lang.Override
public long getI() {
return i_;
}
/**
*
* int
*
*
* optional int64 i = 3;
* @param value The i to set.
* @return This builder for chaining.
*/
public Builder setI(long value) {
i_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
* int
*
*
* optional int64 i = 3;
* @return This builder for chaining.
*/
public Builder clearI() {
bitField0_ = (bitField0_ & ~0x00000020);
i_ = 0L;
onChanged();
return this;
}
private com.google.protobuf.ByteString s_ = com.google.protobuf.ByteString.EMPTY;
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return Whether the s field is set.
*/
@java.lang.Override
public boolean hasS() {
return ((bitField0_ & 0x00000040) != 0);
}
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @return The s.
*/
@java.lang.Override
public com.google.protobuf.ByteString getS() {
return s_;
}
/**
*
* UTF-8 string
*
*
* optional bytes s = 4;
* @param value The s to set.
* @return This builder for chaining.
*/
public Builder setS(com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
s_ = value;
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
*
*
* optional .onnx.TensorProto t = 5;
* @return Whether the t field is set.
*/
public boolean hasT() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
* @return The t.
*/
public onnx.Onnx.TensorProto getT() {
if (tBuilder_ == null) {
return t_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : t_;
} else {
return tBuilder_.getMessage();
}
}
/**
*
* tensor value
*
*
* optional .onnx.TensorProto t = 5;
*/
public Builder setT(onnx.Onnx.TensorProto value) {
if (tBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
t_ = value;
} else {
tBuilder_.setMessage(value);
}
bitField0_ |= 0x00000080;
onChanged();
return this;
}
/**
*
*
* repeated float floats = 7;
* @return A list containing the floats.
*/
public java.util.List
getFloatsList() {
floats_.makeImmutable();
return floats_;
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @return The count of floats.
*/
public int getFloatsCount() {
return floats_.size();
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param index The index of the element to return.
* @return The floats at the given index.
*/
public float getFloats(int index) {
return floats_.getFloat(index);
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param index The index to set the value at.
* @param value The floats to set.
* @return This builder for chaining.
*/
public Builder setFloats(
int index, float value) {
ensureFloatsIsMutable();
floats_.setFloat(index, value);
bitField0_ |= 0x00000800;
onChanged();
return this;
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param value The floats to add.
* @return This builder for chaining.
*/
public Builder addFloats(float value) {
ensureFloatsIsMutable();
floats_.addFloat(value);
bitField0_ |= 0x00000800;
onChanged();
return this;
}
/**
*
* list of floats
*
*
* repeated float floats = 7;
* @param values The floats to add.
* @return This builder for chaining.
*/
public Builder addAllFloats(
java.lang.Iterable extends java.lang.Float> values) {
ensureFloatsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, floats_);
bitField0_ |= 0x00000800;
onChanged();
return this;
}
/**
*
*
* repeated int64 ints = 8;
* @return A list containing the ints.
*/
public java.util.List
getIntsList() {
ints_.makeImmutable();
return ints_;
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @return The count of ints.
*/
public int getIntsCount() {
return ints_.size();
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param index The index of the element to return.
* @return The ints at the given index.
*/
public long getInts(int index) {
return ints_.getLong(index);
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param index The index to set the value at.
* @param value The ints to set.
* @return This builder for chaining.
*/
public Builder setInts(
int index, long value) {
ensureIntsIsMutable();
ints_.setLong(index, value);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param value The ints to add.
* @return This builder for chaining.
*/
public Builder addInts(long value) {
ensureIntsIsMutable();
ints_.addLong(value);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* list of ints
*
*
* repeated int64 ints = 8;
* @param values The ints to add.
* @return This builder for chaining.
*/
public Builder addAllInts(
java.lang.Iterable extends java.lang.Long> values) {
ensureIntsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, ints_);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
*
* repeated bytes strings = 9;
* @return A list containing the strings.
*/
public java.util.List
getStringsList() {
strings_.makeImmutable();
return strings_;
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @return The count of strings.
*/
public int getStringsCount() {
return strings_.size();
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param index The index of the element to return.
* @return The strings at the given index.
*/
public com.google.protobuf.ByteString getStrings(int index) {
return strings_.get(index);
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param index The index to set the value at.
* @param value The strings to set.
* @return This builder for chaining.
*/
public Builder setStrings(
int index, com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureStringsIsMutable();
strings_.set(index, value);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param value The strings to add.
* @return This builder for chaining.
*/
public Builder addStrings(com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureStringsIsMutable();
strings_.add(value);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* list of UTF-8 strings
*
*
* repeated bytes strings = 9;
* @param values The strings to add.
* @return This builder for chaining.
*/
public Builder addAllStrings(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureStringsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, strings_);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* A human-readable documentation for this value. Markdown is allowed.
*
*
* optional string doc_string = 3;
* @return The bytes for docString.
*/
com.google.protobuf.ByteString
getDocStringBytes();
}
/**
*
* Defines information on value, including the name, the type, and
* the shape of the value.
*
*
* Protobuf type {@code onnx.ValueInfoProto}
*/
public static final class ValueInfoProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.ValueInfoProto)
ValueInfoProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ValueInfoProto.newBuilder() to construct.
private ValueInfoProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ValueInfoProto() {
name_ = "";
docString_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ValueInfoProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_ValueInfoProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_ValueInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.ValueInfoProto.class, onnx.Onnx.ValueInfoProto.Builder.class);
}
private int bitField0_;
public static final int NAME_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private volatile java.lang.Object name_ = "";
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return Whether the name field is set.
*/
@java.lang.Override
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return The name.
*/
@java.lang.Override
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return The bytes for name.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int TYPE_FIELD_NUMBER = 2;
private onnx.Onnx.TypeProto type_;
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
* @return Whether the type field is set.
*/
@java.lang.Override
public boolean hasType() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
* @return The type.
*/
@java.lang.Override
public onnx.Onnx.TypeProto getType() {
return type_ == null ? onnx.Onnx.TypeProto.getDefaultInstance() : type_;
}
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
*/
@java.lang.Override
public onnx.Onnx.TypeProtoOrBuilder getTypeOrBuilder() {
return type_ == null ? onnx.Onnx.TypeProto.getDefaultInstance() : type_;
}
public static final int DOC_STRING_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private volatile java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this value. Markdown is allowed.
*
*
* optional string doc_string = 3;
* @return Whether the docString field is set.
*/
@java.lang.Override
public boolean hasDocString() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* A human-readable documentation for this value. Markdown is allowed.
*
* A human-readable documentation for this value. Markdown is allowed.
*
*
* optional string doc_string = 3;
* @return The bytes for docString.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDocStringBytes() {
java.lang.Object ref = docString_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
docString_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getType());
}
if (((bitField0_ & 0x00000004) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, docString_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getType());
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, docString_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.ValueInfoProto)) {
return super.equals(obj);
}
onnx.Onnx.ValueInfoProto other = (onnx.Onnx.ValueInfoProto) obj;
if (hasName() != other.hasName()) return false;
if (hasName()) {
if (!getName()
.equals(other.getName())) return false;
}
if (hasType() != other.hasType()) return false;
if (hasType()) {
if (!getType()
.equals(other.getType())) return false;
}
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasType()) {
hash = (37 * hash) + TYPE_FIELD_NUMBER;
hash = (53 * hash) + getType().hashCode();
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.ValueInfoProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ValueInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ValueInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.ValueInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.ValueInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.ValueInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.ValueInfoProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Defines information on value, including the name, the type, and
* the shape of the value.
*
*
* Protobuf type {@code onnx.ValueInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.ValueInfoProto)
onnx.Onnx.ValueInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_ValueInfoProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_ValueInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.ValueInfoProto.class, onnx.Onnx.ValueInfoProto.Builder.class);
}
// Construct using onnx.Onnx.ValueInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getTypeFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
name_ = "";
type_ = null;
if (typeBuilder_ != null) {
typeBuilder_.dispose();
typeBuilder_ = null;
}
docString_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_ValueInfoProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.ValueInfoProto getDefaultInstanceForType() {
return onnx.Onnx.ValueInfoProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.ValueInfoProto build() {
onnx.Onnx.ValueInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.ValueInfoProto buildPartial() {
onnx.Onnx.ValueInfoProto result = new onnx.Onnx.ValueInfoProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(onnx.Onnx.ValueInfoProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.name_ = name_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.type_ = typeBuilder_ == null
? type_
: typeBuilder_.build();
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.docString_ = docString_;
to_bitField0_ |= 0x00000004;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.ValueInfoProto) {
return mergeFrom((onnx.Onnx.ValueInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.ValueInfoProto other) {
if (other == onnx.Onnx.ValueInfoProto.getDefaultInstance()) return this;
if (other.hasName()) {
name_ = other.name_;
bitField0_ |= 0x00000001;
onChanged();
}
if (other.hasType()) {
mergeType(other.getType());
}
if (other.hasDocString()) {
docString_ = other.docString_;
bitField0_ |= 0x00000004;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
name_ = input.readBytes();
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
input.readMessage(
getTypeFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000002;
break;
} // case 18
case 26: {
docString_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 26
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private java.lang.Object name_ = "";
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return Whether the name field is set.
*/
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return The name.
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return The bytes for name.
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @param value The name to set.
* @return This builder for chaining.
*/
public Builder setName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @return This builder for chaining.
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
*
* This field MUST be present in this version of the IR.
*
*
* optional string name = 1;
* @param value The bytes for name to set.
* @return This builder for chaining.
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private onnx.Onnx.TypeProto type_;
private com.google.protobuf.SingleFieldBuilderV3<
onnx.Onnx.TypeProto, onnx.Onnx.TypeProto.Builder, onnx.Onnx.TypeProtoOrBuilder> typeBuilder_;
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
* @return Whether the type field is set.
*/
public boolean hasType() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
* @return The type.
*/
public onnx.Onnx.TypeProto getType() {
if (typeBuilder_ == null) {
return type_ == null ? onnx.Onnx.TypeProto.getDefaultInstance() : type_;
} else {
return typeBuilder_.getMessage();
}
}
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
*
* optional .onnx.TypeProto type = 2;
*/
public Builder setType(onnx.Onnx.TypeProto value) {
if (typeBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
type_ = value;
} else {
typeBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* This field MUST be present in this version of the IR for
* inputs and outputs of the top-level graph.
*
* A human-readable documentation for this value. Markdown is allowed.
*
*
* optional string doc_string = 3;
* @param value The bytes for docString to set.
* @return This builder for chaining.
*/
public Builder setDocStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
docString_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.ValueInfoProto)
}
// @@protoc_insertion_point(class_scope:onnx.ValueInfoProto)
private static final onnx.Onnx.ValueInfoProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.ValueInfoProto();
}
public static onnx.Onnx.ValueInfoProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public ValueInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.ValueInfoProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface NodeProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.NodeProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return A list containing the input.
*/
java.util.List
getInputList();
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return The count of input.
*/
int getInputCount();
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the element to return.
* @return The input at the given index.
*/
java.lang.String getInput(int index);
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the value to return.
* @return The bytes of the input at the given index.
*/
com.google.protobuf.ByteString
getInputBytes(int index);
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return A list containing the output.
*/
java.util.List
getOutputList();
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return The count of output.
*/
int getOutputCount();
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the element to return.
* @return The output at the given index.
*/
java.lang.String getOutput(int index);
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the value to return.
* @return The bytes of the output at the given index.
*/
com.google.protobuf.ByteString
getOutputBytes(int index);
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return Whether the name field is set.
*/
boolean hasName();
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The name.
*/
java.lang.String getName();
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The bytes for name.
*/
com.google.protobuf.ByteString
getNameBytes();
/**
*
* The symbolic identifier of the Operator to execute.
*
*
* optional string op_type = 4;
* @return Whether the opType field is set.
*/
boolean hasOpType();
/**
*
* The symbolic identifier of the Operator to execute.
*
* A human-readable documentation for this node. Markdown is allowed.
*
*
* optional string doc_string = 6;
* @return The bytes for docString.
*/
com.google.protobuf.ByteString
getDocStringBytes();
}
/**
*
* Nodes
*
* Computation graphs are made up of a DAG of nodes, which represent what is
* commonly called a "layer" or "pipeline stage" in machine learning frameworks.
*
* For example, it can be a node of type "Conv" that takes in an image, a filter
* tensor and a bias tensor, and produces the convolved output.
*
*
* Protobuf type {@code onnx.NodeProto}
*/
public static final class NodeProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.NodeProto)
NodeProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use NodeProto.newBuilder() to construct.
private NodeProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private NodeProto() {
input_ =
com.google.protobuf.LazyStringArrayList.emptyList();
output_ =
com.google.protobuf.LazyStringArrayList.emptyList();
name_ = "";
opType_ = "";
domain_ = "";
attribute_ = java.util.Collections.emptyList();
docString_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new NodeProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_NodeProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_NodeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.NodeProto.class, onnx.Onnx.NodeProto.Builder.class);
}
private int bitField0_;
public static final int INPUT_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private com.google.protobuf.LazyStringArrayList input_ =
com.google.protobuf.LazyStringArrayList.emptyList();
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return A list containing the input.
*/
public com.google.protobuf.ProtocolStringList
getInputList() {
return input_;
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return The count of input.
*/
public int getInputCount() {
return input_.size();
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the element to return.
* @return The input at the given index.
*/
public java.lang.String getInput(int index) {
return input_.get(index);
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the value to return.
* @return The bytes of the input at the given index.
*/
public com.google.protobuf.ByteString
getInputBytes(int index) {
return input_.getByteString(index);
}
public static final int OUTPUT_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private com.google.protobuf.LazyStringArrayList output_ =
com.google.protobuf.LazyStringArrayList.emptyList();
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return A list containing the output.
*/
public com.google.protobuf.ProtocolStringList
getOutputList() {
return output_;
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return The count of output.
*/
public int getOutputCount() {
return output_.size();
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the element to return.
* @return The output at the given index.
*/
public java.lang.String getOutput(int index) {
return output_.get(index);
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the value to return.
* @return The bytes of the output at the given index.
*/
public com.google.protobuf.ByteString
getOutputBytes(int index) {
return output_.getByteString(index);
}
public static final int NAME_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private volatile java.lang.Object name_ = "";
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return Whether the name field is set.
*/
@java.lang.Override
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The name.
*/
@java.lang.Override
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The bytes for name.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int OP_TYPE_FIELD_NUMBER = 4;
@SuppressWarnings("serial")
private volatile java.lang.Object opType_ = "";
/**
*
* The symbolic identifier of the Operator to execute.
*
*
* optional string op_type = 4;
* @return Whether the opType field is set.
*/
@java.lang.Override
public boolean hasOpType() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The symbolic identifier of the Operator to execute.
*
* A human-readable documentation for this node. Markdown is allowed.
*
*
* optional string doc_string = 6;
* @return The bytes for docString.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDocStringBytes() {
java.lang.Object ref = docString_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
docString_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < input_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, input_.getRaw(i));
}
for (int i = 0; i < output_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, output_.getRaw(i));
}
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, name_);
}
if (((bitField0_ & 0x00000002) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 4, opType_);
}
for (int i = 0; i < attribute_.size(); i++) {
output.writeMessage(5, attribute_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 6, docString_);
}
if (((bitField0_ & 0x00000004) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 7, domain_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < input_.size(); i++) {
dataSize += computeStringSizeNoTag(input_.getRaw(i));
}
size += dataSize;
size += 1 * getInputList().size();
}
{
int dataSize = 0;
for (int i = 0; i < output_.size(); i++) {
dataSize += computeStringSizeNoTag(output_.getRaw(i));
}
size += dataSize;
size += 1 * getOutputList().size();
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, name_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, opType_);
}
for (int i = 0; i < attribute_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, attribute_.get(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, docString_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, domain_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.NodeProto)) {
return super.equals(obj);
}
onnx.Onnx.NodeProto other = (onnx.Onnx.NodeProto) obj;
if (!getInputList()
.equals(other.getInputList())) return false;
if (!getOutputList()
.equals(other.getOutputList())) return false;
if (hasName() != other.hasName()) return false;
if (hasName()) {
if (!getName()
.equals(other.getName())) return false;
}
if (hasOpType() != other.hasOpType()) return false;
if (hasOpType()) {
if (!getOpType()
.equals(other.getOpType())) return false;
}
if (hasDomain() != other.hasDomain()) return false;
if (hasDomain()) {
if (!getDomain()
.equals(other.getDomain())) return false;
}
if (!getAttributeList()
.equals(other.getAttributeList())) return false;
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getInputCount() > 0) {
hash = (37 * hash) + INPUT_FIELD_NUMBER;
hash = (53 * hash) + getInputList().hashCode();
}
if (getOutputCount() > 0) {
hash = (37 * hash) + OUTPUT_FIELD_NUMBER;
hash = (53 * hash) + getOutputList().hashCode();
}
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasOpType()) {
hash = (37 * hash) + OP_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getOpType().hashCode();
}
if (hasDomain()) {
hash = (37 * hash) + DOMAIN_FIELD_NUMBER;
hash = (53 * hash) + getDomain().hashCode();
}
if (getAttributeCount() > 0) {
hash = (37 * hash) + ATTRIBUTE_FIELD_NUMBER;
hash = (53 * hash) + getAttributeList().hashCode();
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.NodeProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.NodeProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.NodeProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.NodeProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.NodeProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.NodeProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.NodeProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.NodeProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.NodeProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.NodeProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.NodeProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.NodeProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.NodeProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Nodes
*
* Computation graphs are made up of a DAG of nodes, which represent what is
* commonly called a "layer" or "pipeline stage" in machine learning frameworks.
*
* For example, it can be a node of type "Conv" that takes in an image, a filter
* tensor and a bias tensor, and produces the convolved output.
*
*
* Protobuf type {@code onnx.NodeProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.NodeProto)
onnx.Onnx.NodeProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_NodeProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_NodeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.NodeProto.class, onnx.Onnx.NodeProto.Builder.class);
}
// Construct using onnx.Onnx.NodeProto.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
input_ =
com.google.protobuf.LazyStringArrayList.emptyList();
output_ =
com.google.protobuf.LazyStringArrayList.emptyList();
name_ = "";
opType_ = "";
domain_ = "";
if (attributeBuilder_ == null) {
attribute_ = java.util.Collections.emptyList();
} else {
attribute_ = null;
attributeBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000020);
docString_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_NodeProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.NodeProto getDefaultInstanceForType() {
return onnx.Onnx.NodeProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.NodeProto build() {
onnx.Onnx.NodeProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.NodeProto buildPartial() {
onnx.Onnx.NodeProto result = new onnx.Onnx.NodeProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.NodeProto result) {
if (attributeBuilder_ == null) {
if (((bitField0_ & 0x00000020) != 0)) {
attribute_ = java.util.Collections.unmodifiableList(attribute_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.attribute_ = attribute_;
} else {
result.attribute_ = attributeBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.NodeProto result) {
int from_bitField0_ = bitField0_;
if (((from_bitField0_ & 0x00000001) != 0)) {
input_.makeImmutable();
result.input_ = input_;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
output_.makeImmutable();
result.output_ = output_;
}
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000004) != 0)) {
result.name_ = name_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.opType_ = opType_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.domain_ = domain_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.docString_ = docString_;
to_bitField0_ |= 0x00000008;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.NodeProto) {
return mergeFrom((onnx.Onnx.NodeProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.NodeProto other) {
if (other == onnx.Onnx.NodeProto.getDefaultInstance()) return this;
if (!other.input_.isEmpty()) {
if (input_.isEmpty()) {
input_ = other.input_;
bitField0_ |= 0x00000001;
} else {
ensureInputIsMutable();
input_.addAll(other.input_);
}
onChanged();
}
if (!other.output_.isEmpty()) {
if (output_.isEmpty()) {
output_ = other.output_;
bitField0_ |= 0x00000002;
} else {
ensureOutputIsMutable();
output_.addAll(other.output_);
}
onChanged();
}
if (other.hasName()) {
name_ = other.name_;
bitField0_ |= 0x00000004;
onChanged();
}
if (other.hasOpType()) {
opType_ = other.opType_;
bitField0_ |= 0x00000008;
onChanged();
}
if (other.hasDomain()) {
domain_ = other.domain_;
bitField0_ |= 0x00000010;
onChanged();
}
if (attributeBuilder_ == null) {
if (!other.attribute_.isEmpty()) {
if (attribute_.isEmpty()) {
attribute_ = other.attribute_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureAttributeIsMutable();
attribute_.addAll(other.attribute_);
}
onChanged();
}
} else {
if (!other.attribute_.isEmpty()) {
if (attributeBuilder_.isEmpty()) {
attributeBuilder_.dispose();
attributeBuilder_ = null;
attribute_ = other.attribute_;
bitField0_ = (bitField0_ & ~0x00000020);
attributeBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getAttributeFieldBuilder() : null;
} else {
attributeBuilder_.addAllMessages(other.attribute_);
}
}
}
if (other.hasDocString()) {
docString_ = other.docString_;
bitField0_ |= 0x00000040;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
com.google.protobuf.ByteString bs = input.readBytes();
ensureInputIsMutable();
input_.add(bs);
break;
} // case 10
case 18: {
com.google.protobuf.ByteString bs = input.readBytes();
ensureOutputIsMutable();
output_.add(bs);
break;
} // case 18
case 26: {
name_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 26
case 34: {
opType_ = input.readBytes();
bitField0_ |= 0x00000008;
break;
} // case 34
case 42: {
onnx.Onnx.AttributeProto m =
input.readMessage(
onnx.Onnx.AttributeProto.PARSER,
extensionRegistry);
if (attributeBuilder_ == null) {
ensureAttributeIsMutable();
attribute_.add(m);
} else {
attributeBuilder_.addMessage(m);
}
break;
} // case 42
case 50: {
docString_ = input.readBytes();
bitField0_ |= 0x00000040;
break;
} // case 50
case 58: {
domain_ = input.readBytes();
bitField0_ |= 0x00000010;
break;
} // case 58
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private com.google.protobuf.LazyStringArrayList input_ =
com.google.protobuf.LazyStringArrayList.emptyList();
private void ensureInputIsMutable() {
if (!input_.isModifiable()) {
input_ = new com.google.protobuf.LazyStringArrayList(input_);
}
bitField0_ |= 0x00000001;
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return A list containing the input.
*/
public com.google.protobuf.ProtocolStringList
getInputList() {
input_.makeImmutable();
return input_;
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @return The count of input.
*/
public int getInputCount() {
return input_.size();
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the element to return.
* @return The input at the given index.
*/
public java.lang.String getInput(int index) {
return input_.get(index);
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index of the value to return.
* @return The bytes of the input at the given index.
*/
public com.google.protobuf.ByteString
getInputBytes(int index) {
return input_.getByteString(index);
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param index The index to set the value at.
* @param value The input to set.
* @return This builder for chaining.
*/
public Builder setInput(
int index, java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
ensureInputIsMutable();
input_.set(index, value);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param value The input to add.
* @return This builder for chaining.
*/
public Builder addInput(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
ensureInputIsMutable();
input_.add(value);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* namespace Value
*
*
* repeated string input = 1;
* @param values The input to add.
* @return This builder for chaining.
*/
public Builder addAllInput(
java.lang.Iterable values) {
ensureInputIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, input_);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
*
* repeated string input = 1;
* @param value The bytes of the input to add.
* @return This builder for chaining.
*/
public Builder addInputBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureInputIsMutable();
input_.add(value);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private com.google.protobuf.LazyStringArrayList output_ =
com.google.protobuf.LazyStringArrayList.emptyList();
private void ensureOutputIsMutable() {
if (!output_.isModifiable()) {
output_ = new com.google.protobuf.LazyStringArrayList(output_);
}
bitField0_ |= 0x00000002;
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return A list containing the output.
*/
public com.google.protobuf.ProtocolStringList
getOutputList() {
output_.makeImmutable();
return output_;
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @return The count of output.
*/
public int getOutputCount() {
return output_.size();
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the element to return.
* @return The output at the given index.
*/
public java.lang.String getOutput(int index) {
return output_.get(index);
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index of the value to return.
* @return The bytes of the output at the given index.
*/
public com.google.protobuf.ByteString
getOutputBytes(int index) {
return output_.getByteString(index);
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param index The index to set the value at.
* @param value The output to set.
* @return This builder for chaining.
*/
public Builder setOutput(
int index, java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
ensureOutputIsMutable();
output_.set(index, value);
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param value The output to add.
* @return This builder for chaining.
*/
public Builder addOutput(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
ensureOutputIsMutable();
output_.add(value);
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* namespace Value
*
*
* repeated string output = 2;
* @param values The output to add.
* @return This builder for chaining.
*/
public Builder addAllOutput(
java.lang.Iterable values) {
ensureOutputIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, output_);
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
*
* repeated string output = 2;
* @param value The bytes of the output to add.
* @return This builder for chaining.
*/
public Builder addOutputBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureOutputIsMutable();
output_.add(value);
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private java.lang.Object name_ = "";
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return Whether the name field is set.
*/
public boolean hasName() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The name.
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return The bytes for name.
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @param value The name to set.
* @return This builder for chaining.
*/
public Builder setName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @return This builder for chaining.
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
/**
*
* An optional identifier for this node in a graph.
* This field MAY be absent in ths version of the IR.
*
*
* optional string name = 3;
* @param value The bytes for name to set.
* @return This builder for chaining.
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
private java.lang.Object opType_ = "";
/**
*
* The symbolic identifier of the Operator to execute.
*
*
* optional string op_type = 4;
* @return Whether the opType field is set.
*/
public boolean hasOpType() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* The symbolic identifier of the Operator to execute.
*
* A human-readable documentation for this node. Markdown is allowed.
*
*
* optional string doc_string = 6;
* @param value The bytes for docString to set.
* @return This builder for chaining.
*/
public Builder setDocStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
docString_ = value;
bitField0_ |= 0x00000040;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.NodeProto)
}
// @@protoc_insertion_point(class_scope:onnx.NodeProto)
private static final onnx.Onnx.NodeProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.NodeProto();
}
public static onnx.Onnx.NodeProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public NodeProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.NodeProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TrainingInfoProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TrainingInfoProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
*
* optional .onnx.GraphProto initialization = 1;
* @return Whether the initialization field is set.
*/
boolean hasInitialization();
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
*
* optional .onnx.GraphProto algorithm = 2;
* @return Whether the algorithm field is set.
*/
boolean hasAlgorithm();
/**
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Training information
* TrainingInfoProto stores information for training a model.
* In particular, this defines two functionalities: an initialization-step
* and a training-algorithm-step. Initialization resets the model
* back to its original state as if no training has been performed.
* Training algorithm improves the model based on input data.
*
* The semantics of the initialization-step is that the initializers
* in ModelProto.graph and in TrainingInfoProto.algorithm are first
* initialized as specified by the initializers in the graph, and then
* updated by the "initialization_binding" in every instance in
* ModelProto.training_info.
*
* The field "algorithm" defines a computation graph which represents a
* training algorithm's step. After the execution of a
* TrainingInfoProto.algorithm, the initializers specified by "update_binding"
* may be immediately updated. If the targeted training algorithm contains
* consecutive update steps (such as block coordinate descent methods),
* the user needs to create a TrainingInfoProto for each step.
*
*
* Protobuf type {@code onnx.TrainingInfoProto}
*/
public static final class TrainingInfoProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.TrainingInfoProto)
TrainingInfoProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use TrainingInfoProto.newBuilder() to construct.
private TrainingInfoProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TrainingInfoProto() {
initializationBinding_ = java.util.Collections.emptyList();
updateBinding_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new TrainingInfoProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TrainingInfoProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TrainingInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TrainingInfoProto.class, onnx.Onnx.TrainingInfoProto.Builder.class);
}
private int bitField0_;
public static final int INITIALIZATION_FIELD_NUMBER = 1;
private onnx.Onnx.GraphProto initialization_;
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
*
* optional .onnx.GraphProto initialization = 1;
* @return Whether the initialization field is set.
*/
@java.lang.Override
public boolean hasInitialization() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
*
* optional .onnx.GraphProto initialization = 1;
*/
@java.lang.Override
public onnx.Onnx.GraphProtoOrBuilder getInitializationOrBuilder() {
return initialization_ == null ? onnx.Onnx.GraphProto.getDefaultInstance() : initialization_;
}
public static final int ALGORITHM_FIELD_NUMBER = 2;
private onnx.Onnx.GraphProto algorithm_;
/**
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
*
* optional .onnx.GraphProto algorithm = 2;
* @return Whether the algorithm field is set.
*/
@java.lang.Override
public boolean hasAlgorithm() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
*
* optional .onnx.GraphProto algorithm = 2;
*/
@java.lang.Override
public onnx.Onnx.GraphProtoOrBuilder getAlgorithmOrBuilder() {
return algorithm_ == null ? onnx.Onnx.GraphProto.getDefaultInstance() : algorithm_;
}
public static final int INITIALIZATION_BINDING_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private java.util.List initializationBinding_;
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
@java.lang.Override
public int getInitializationBindingCount() {
return initializationBinding_.size();
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
@java.lang.Override
public onnx.Onnx.StringStringEntryProtoOrBuilder getInitializationBindingOrBuilder(
int index) {
return initializationBinding_.get(index);
}
public static final int UPDATE_BINDING_FIELD_NUMBER = 4;
@SuppressWarnings("serial")
private java.util.List updateBinding_;
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
@java.lang.Override
public int getUpdateBindingCount() {
return updateBinding_.size();
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
@java.lang.Override
public onnx.Onnx.StringStringEntryProtoOrBuilder getUpdateBindingOrBuilder(
int index) {
return updateBinding_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getInitialization());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getAlgorithm());
}
for (int i = 0; i < initializationBinding_.size(); i++) {
output.writeMessage(3, initializationBinding_.get(i));
}
for (int i = 0; i < updateBinding_.size(); i++) {
output.writeMessage(4, updateBinding_.get(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, getInitialization());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getAlgorithm());
}
for (int i = 0; i < initializationBinding_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, initializationBinding_.get(i));
}
for (int i = 0; i < updateBinding_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, updateBinding_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.TrainingInfoProto)) {
return super.equals(obj);
}
onnx.Onnx.TrainingInfoProto other = (onnx.Onnx.TrainingInfoProto) obj;
if (hasInitialization() != other.hasInitialization()) return false;
if (hasInitialization()) {
if (!getInitialization()
.equals(other.getInitialization())) return false;
}
if (hasAlgorithm() != other.hasAlgorithm()) return false;
if (hasAlgorithm()) {
if (!getAlgorithm()
.equals(other.getAlgorithm())) return false;
}
if (!getInitializationBindingList()
.equals(other.getInitializationBindingList())) return false;
if (!getUpdateBindingList()
.equals(other.getUpdateBindingList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasInitialization()) {
hash = (37 * hash) + INITIALIZATION_FIELD_NUMBER;
hash = (53 * hash) + getInitialization().hashCode();
}
if (hasAlgorithm()) {
hash = (37 * hash) + ALGORITHM_FIELD_NUMBER;
hash = (53 * hash) + getAlgorithm().hashCode();
}
if (getInitializationBindingCount() > 0) {
hash = (37 * hash) + INITIALIZATION_BINDING_FIELD_NUMBER;
hash = (53 * hash) + getInitializationBindingList().hashCode();
}
if (getUpdateBindingCount() > 0) {
hash = (37 * hash) + UPDATE_BINDING_FIELD_NUMBER;
hash = (53 * hash) + getUpdateBindingList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TrainingInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.TrainingInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TrainingInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.TrainingInfoProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Training information
* TrainingInfoProto stores information for training a model.
* In particular, this defines two functionalities: an initialization-step
* and a training-algorithm-step. Initialization resets the model
* back to its original state as if no training has been performed.
* Training algorithm improves the model based on input data.
*
* The semantics of the initialization-step is that the initializers
* in ModelProto.graph and in TrainingInfoProto.algorithm are first
* initialized as specified by the initializers in the graph, and then
* updated by the "initialization_binding" in every instance in
* ModelProto.training_info.
*
* The field "algorithm" defines a computation graph which represents a
* training algorithm's step. After the execution of a
* TrainingInfoProto.algorithm, the initializers specified by "update_binding"
* may be immediately updated. If the targeted training algorithm contains
* consecutive update steps (such as block coordinate descent methods),
* the user needs to create a TrainingInfoProto for each step.
*
*
* Protobuf type {@code onnx.TrainingInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.TrainingInfoProto)
onnx.Onnx.TrainingInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TrainingInfoProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TrainingInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TrainingInfoProto.class, onnx.Onnx.TrainingInfoProto.Builder.class);
}
// Construct using onnx.Onnx.TrainingInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getInitializationFieldBuilder();
getAlgorithmFieldBuilder();
getInitializationBindingFieldBuilder();
getUpdateBindingFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
initialization_ = null;
if (initializationBuilder_ != null) {
initializationBuilder_.dispose();
initializationBuilder_ = null;
}
algorithm_ = null;
if (algorithmBuilder_ != null) {
algorithmBuilder_.dispose();
algorithmBuilder_ = null;
}
if (initializationBindingBuilder_ == null) {
initializationBinding_ = java.util.Collections.emptyList();
} else {
initializationBinding_ = null;
initializationBindingBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (updateBindingBuilder_ == null) {
updateBinding_ = java.util.Collections.emptyList();
} else {
updateBinding_ = null;
updateBindingBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_TrainingInfoProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.TrainingInfoProto getDefaultInstanceForType() {
return onnx.Onnx.TrainingInfoProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.TrainingInfoProto build() {
onnx.Onnx.TrainingInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.TrainingInfoProto buildPartial() {
onnx.Onnx.TrainingInfoProto result = new onnx.Onnx.TrainingInfoProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.TrainingInfoProto result) {
if (initializationBindingBuilder_ == null) {
if (((bitField0_ & 0x00000004) != 0)) {
initializationBinding_ = java.util.Collections.unmodifiableList(initializationBinding_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.initializationBinding_ = initializationBinding_;
} else {
result.initializationBinding_ = initializationBindingBuilder_.build();
}
if (updateBindingBuilder_ == null) {
if (((bitField0_ & 0x00000008) != 0)) {
updateBinding_ = java.util.Collections.unmodifiableList(updateBinding_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.updateBinding_ = updateBinding_;
} else {
result.updateBinding_ = updateBindingBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.TrainingInfoProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.initialization_ = initializationBuilder_ == null
? initialization_
: initializationBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.algorithm_ = algorithmBuilder_ == null
? algorithm_
: algorithmBuilder_.build();
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.TrainingInfoProto) {
return mergeFrom((onnx.Onnx.TrainingInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.TrainingInfoProto other) {
if (other == onnx.Onnx.TrainingInfoProto.getDefaultInstance()) return this;
if (other.hasInitialization()) {
mergeInitialization(other.getInitialization());
}
if (other.hasAlgorithm()) {
mergeAlgorithm(other.getAlgorithm());
}
if (initializationBindingBuilder_ == null) {
if (!other.initializationBinding_.isEmpty()) {
if (initializationBinding_.isEmpty()) {
initializationBinding_ = other.initializationBinding_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureInitializationBindingIsMutable();
initializationBinding_.addAll(other.initializationBinding_);
}
onChanged();
}
} else {
if (!other.initializationBinding_.isEmpty()) {
if (initializationBindingBuilder_.isEmpty()) {
initializationBindingBuilder_.dispose();
initializationBindingBuilder_ = null;
initializationBinding_ = other.initializationBinding_;
bitField0_ = (bitField0_ & ~0x00000004);
initializationBindingBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getInitializationBindingFieldBuilder() : null;
} else {
initializationBindingBuilder_.addAllMessages(other.initializationBinding_);
}
}
}
if (updateBindingBuilder_ == null) {
if (!other.updateBinding_.isEmpty()) {
if (updateBinding_.isEmpty()) {
updateBinding_ = other.updateBinding_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureUpdateBindingIsMutable();
updateBinding_.addAll(other.updateBinding_);
}
onChanged();
}
} else {
if (!other.updateBinding_.isEmpty()) {
if (updateBindingBuilder_.isEmpty()) {
updateBindingBuilder_.dispose();
updateBindingBuilder_ = null;
updateBinding_ = other.updateBinding_;
bitField0_ = (bitField0_ & ~0x00000008);
updateBindingBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getUpdateBindingFieldBuilder() : null;
} else {
updateBindingBuilder_.addAllMessages(other.updateBinding_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getInitializationFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
input.readMessage(
getAlgorithmFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000002;
break;
} // case 18
case 26: {
onnx.Onnx.StringStringEntryProto m =
input.readMessage(
onnx.Onnx.StringStringEntryProto.PARSER,
extensionRegistry);
if (initializationBindingBuilder_ == null) {
ensureInitializationBindingIsMutable();
initializationBinding_.add(m);
} else {
initializationBindingBuilder_.addMessage(m);
}
break;
} // case 26
case 34: {
onnx.Onnx.StringStringEntryProto m =
input.readMessage(
onnx.Onnx.StringStringEntryProto.PARSER,
extensionRegistry);
if (updateBindingBuilder_ == null) {
ensureUpdateBindingIsMutable();
updateBinding_.add(m);
} else {
updateBindingBuilder_.addMessage(m);
}
break;
} // case 34
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private onnx.Onnx.GraphProto initialization_;
private com.google.protobuf.SingleFieldBuilderV3<
onnx.Onnx.GraphProto, onnx.Onnx.GraphProto.Builder, onnx.Onnx.GraphProtoOrBuilder> initializationBuilder_;
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
*
* optional .onnx.GraphProto initialization = 1;
* @return Whether the initialization field is set.
*/
public boolean hasInitialization() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field describes a graph to compute the initial tensors
* upon starting the training process. Initialization graph has no input
* and can have multiple outputs. Usually, trainable tensors in neural
* networks are randomly initialized. To achieve that, for each tensor,
* the user can put a random number operator such as RandomNormal or
* RandomUniform in TrainingInfoProto.initialization.node and assign its
* random output to the specific tensor using "initialization_binding".
* This graph can also set the initializers in "algorithm" in the same
* TrainingInfoProto; a use case is resetting the number of training
* iteration to zero.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Thus, no initializer would be changed by default.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
*
* optional .onnx.GraphProto algorithm = 2;
* @return Whether the algorithm field is set.
*/
public boolean hasAlgorithm() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field represents a training algorithm step. Given required inputs,
* it computes outputs to update initializers in its own or inference graph's
* initializer lists. In general, this field contains loss node, gradient node,
* optimizer node, increment of iteration count.
*
* An execution of the training algorithm step is performed by executing the
* graph obtained by combining the inference graph (namely "ModelProto.graph")
* and the "algorithm" graph. That is, the actual
* input/initializer/output/node/value_info/sparse_initializer list of
* the training graph is the concatenation of
* "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
* and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
* in that order. This combined graph must satisfy the normal ONNX conditions.
* Now, let's provide a visualization of graph combination for clarity.
* Let the inference graph (i.e., "ModelProto.graph") be
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
* and the "algorithm" graph be
* tensor_d -> Add -> tensor_e
* The combination process results
* tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
*
* Notice that an input of a node in the "algorithm" graph may reference the
* output of a node in the inference graph (but not the other way round). Also, inference
* node cannot reference inputs of "algorithm". With these restrictions, inference graph
* can always be run independently without training information.
*
* By default, this field is an empty graph and its evaluation does not
* produce any output. Evaluating the default training step never
* update any initializers.
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public int getInitializationBindingCount() {
if (initializationBindingBuilder_ == null) {
return initializationBinding_.size();
} else {
return initializationBindingBuilder_.getCount();
}
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public Builder setInitializationBinding(
int index, onnx.Onnx.StringStringEntryProto value) {
if (initializationBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializationBindingIsMutable();
initializationBinding_.set(index, value);
onChanged();
} else {
initializationBindingBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public Builder addInitializationBinding(onnx.Onnx.StringStringEntryProto value) {
if (initializationBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializationBindingIsMutable();
initializationBinding_.add(value);
onChanged();
} else {
initializationBindingBuilder_.addMessage(value);
}
return this;
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public Builder addInitializationBinding(
int index, onnx.Onnx.StringStringEntryProto value) {
if (initializationBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializationBindingIsMutable();
initializationBinding_.add(index, value);
onChanged();
} else {
initializationBindingBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public onnx.Onnx.StringStringEntryProto.Builder getInitializationBindingBuilder(
int index) {
return getInitializationBindingFieldBuilder().getBuilder(index);
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
*
* repeated .onnx.StringStringEntryProto initialization_binding = 3;
*/
public onnx.Onnx.StringStringEntryProtoOrBuilder getInitializationBindingOrBuilder(
int index) {
if (initializationBindingBuilder_ == null) {
return initializationBinding_.get(index); } else {
return initializationBindingBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* This field specifies the bindings from the outputs of "initialization" to
* some initializers in "ModelProto.graph.initializer" and
* the "algorithm.initializer" in the same TrainingInfoProto.
* See "update_binding" below for details.
*
* By default, this field is empty and no initializer would be changed
* by the execution of "initialization".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public int getUpdateBindingCount() {
if (updateBindingBuilder_ == null) {
return updateBinding_.size();
} else {
return updateBindingBuilder_.getCount();
}
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public Builder setUpdateBinding(
int index, onnx.Onnx.StringStringEntryProto value) {
if (updateBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureUpdateBindingIsMutable();
updateBinding_.set(index, value);
onChanged();
} else {
updateBindingBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public Builder addUpdateBinding(onnx.Onnx.StringStringEntryProto value) {
if (updateBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureUpdateBindingIsMutable();
updateBinding_.add(value);
onChanged();
} else {
updateBindingBuilder_.addMessage(value);
}
return this;
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public Builder addUpdateBinding(
int index, onnx.Onnx.StringStringEntryProto value) {
if (updateBindingBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureUpdateBindingIsMutable();
updateBinding_.add(index, value);
onChanged();
} else {
updateBindingBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public onnx.Onnx.StringStringEntryProto.Builder getUpdateBindingBuilder(
int index) {
return getUpdateBindingFieldBuilder().getBuilder(index);
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public onnx.Onnx.StringStringEntryProtoOrBuilder getUpdateBindingOrBuilder(
int index) {
if (updateBindingBuilder_ == null) {
return updateBinding_.get(index); } else {
return updateBindingBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
* Gradient-based training is usually an iterative procedure. In one gradient
* descent iteration, we apply
*
* x = x - r * g
*
* where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
* gradient of "x" with respect to a chosen loss. To avoid adding assignments
* into the training graph, we split the update equation into
*
* y = x - r * g
* x = y
*
* The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
* tell that "y" should be assigned to "x", the field "update_binding" may
* contain a key-value pair of strings, "x" (key of StringStringEntryProto)
* and "y" (value of StringStringEntryProto).
* For a neural network with multiple trainable (mutable) tensors, there can
* be multiple key-value pairs in "update_binding".
*
* The initializers appears as keys in "update_binding" are considered
* mutable variables. This implies some behaviors
* as described below.
*
* 1. We have only unique keys in all "update_binding"s so that two
* variables may not have the same name. This ensures that one
* variable is assigned up to once.
* 2. The keys must appear in names of "ModelProto.graph.initializer" or
* "TrainingInfoProto.algorithm.initializer".
* 3. The values must be output names of "algorithm" or "ModelProto.graph.output".
* 4. Mutable variables are initialized to the value specified by the
* corresponding initializer, and then potentially updated by
* "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
*
* This field usually contains names of trainable tensors
* (in ModelProto.graph), optimizer states such as momentums in advanced
* stochastic gradient methods (in TrainingInfoProto.graph),
* and number of training iterations (in TrainingInfoProto.graph).
*
* By default, this field is empty and no initializer would be changed
* by the execution of "algorithm".
*
*
* repeated .onnx.StringStringEntryProto update_binding = 4;
*/
public java.util.List
getUpdateBindingBuilderList() {
return getUpdateBindingFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.StringStringEntryProto, onnx.Onnx.StringStringEntryProto.Builder, onnx.Onnx.StringStringEntryProtoOrBuilder>
getUpdateBindingFieldBuilder() {
if (updateBindingBuilder_ == null) {
updateBindingBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.StringStringEntryProto, onnx.Onnx.StringStringEntryProto.Builder, onnx.Onnx.StringStringEntryProtoOrBuilder>(
updateBinding_,
((bitField0_ & 0x00000008) != 0),
getParentForChildren(),
isClean());
updateBinding_ = null;
}
return updateBindingBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.TrainingInfoProto)
}
// @@protoc_insertion_point(class_scope:onnx.TrainingInfoProto)
private static final onnx.Onnx.TrainingInfoProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.TrainingInfoProto();
}
public static onnx.Onnx.TrainingInfoProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public TrainingInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.TrainingInfoProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface ModelProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.ModelProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return Whether the irVersion field is set.
*/
boolean hasIrVersion();
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return The irVersion.
*/
long getIrVersion();
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return Whether the producerName field is set.
*/
boolean hasProducerName();
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return The bytes for producerName.
*/
com.google.protobuf.ByteString
getProducerNameBytes();
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return Whether the producerVersion field is set.
*/
boolean hasProducerVersion();
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return The bytes for producerVersion.
*/
com.google.protobuf.ByteString
getProducerVersionBytes();
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return Whether the domain field is set.
*/
boolean hasDomain();
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return The bytes for domain.
*/
com.google.protobuf.ByteString
getDomainBytes();
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return Whether the modelVersion field is set.
*/
boolean hasModelVersion();
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return The modelVersion.
*/
long getModelVersion();
/**
*
* A human-readable documentation for this model. Markdown is allowed.
*
*
* optional string doc_string = 6;
* @return Whether the docString field is set.
*/
boolean hasDocString();
/**
*
* A human-readable documentation for this model. Markdown is allowed.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* Models
*
* ModelProto is a top-level file/container format for bundling a ML model and
* associating its computation graph with metadata.
*
* The semantics of the model are described by the associated GraphProto's.
*
*
* Protobuf type {@code onnx.ModelProto}
*/
public static final class ModelProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.ModelProto)
ModelProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use ModelProto.newBuilder() to construct.
private ModelProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private ModelProto() {
opsetImport_ = java.util.Collections.emptyList();
producerName_ = "";
producerVersion_ = "";
domain_ = "";
docString_ = "";
metadataProps_ = java.util.Collections.emptyList();
trainingInfo_ = java.util.Collections.emptyList();
functions_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new ModelProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_ModelProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_ModelProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.ModelProto.class, onnx.Onnx.ModelProto.Builder.class);
}
private int bitField0_;
public static final int IR_VERSION_FIELD_NUMBER = 1;
private long irVersion_ = 0L;
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return Whether the irVersion field is set.
*/
@java.lang.Override
public boolean hasIrVersion() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return The irVersion.
*/
@java.lang.Override
public long getIrVersion() {
return irVersion_;
}
public static final int OPSET_IMPORT_FIELD_NUMBER = 8;
@SuppressWarnings("serial")
private java.util.List opsetImport_;
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
@java.lang.Override
public int getOpsetImportCount() {
return opsetImport_.size();
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
@java.lang.Override
public onnx.Onnx.OperatorSetIdProtoOrBuilder getOpsetImportOrBuilder(
int index) {
return opsetImport_.get(index);
}
public static final int PRODUCER_NAME_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object producerName_ = "";
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return Whether the producerName field is set.
*/
@java.lang.Override
public boolean hasProducerName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return The bytes for producerName.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getProducerNameBytes() {
java.lang.Object ref = producerName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
producerName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int PRODUCER_VERSION_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private volatile java.lang.Object producerVersion_ = "";
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return Whether the producerVersion field is set.
*/
@java.lang.Override
public boolean hasProducerVersion() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return The bytes for producerVersion.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getProducerVersionBytes() {
java.lang.Object ref = producerVersion_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
producerVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DOMAIN_FIELD_NUMBER = 4;
@SuppressWarnings("serial")
private volatile java.lang.Object domain_ = "";
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return Whether the domain field is set.
*/
@java.lang.Override
public boolean hasDomain() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return The bytes for domain.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDomainBytes() {
java.lang.Object ref = domain_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
domain_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int MODEL_VERSION_FIELD_NUMBER = 5;
private long modelVersion_ = 0L;
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return Whether the modelVersion field is set.
*/
@java.lang.Override
public boolean hasModelVersion() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return The modelVersion.
*/
@java.lang.Override
public long getModelVersion() {
return modelVersion_;
}
public static final int DOC_STRING_FIELD_NUMBER = 6;
@SuppressWarnings("serial")
private volatile java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this model. Markdown is allowed.
*
*
* optional string doc_string = 6;
* @return Whether the docString field is set.
*/
@java.lang.Override
public boolean hasDocString() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
* A human-readable documentation for this model. Markdown is allowed.
*
* Named metadata values; keys should be distinct.
*
*
* repeated .onnx.StringStringEntryProto metadata_props = 14;
*/
@java.lang.Override
public onnx.Onnx.StringStringEntryProtoOrBuilder getMetadataPropsOrBuilder(
int index) {
return metadataProps_.get(index);
}
public static final int TRAINING_INFO_FIELD_NUMBER = 20;
@SuppressWarnings("serial")
private java.util.List trainingInfo_;
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
@java.lang.Override
public int getTrainingInfoCount() {
return trainingInfo_.size();
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
@java.lang.Override
public onnx.Onnx.TrainingInfoProtoOrBuilder getTrainingInfoOrBuilder(
int index) {
return trainingInfo_.get(index);
}
public static final int FUNCTIONS_FIELD_NUMBER = 25;
@SuppressWarnings("serial")
private java.util.List functions_;
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
@java.lang.Override
public int getFunctionsCount() {
return functions_.size();
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
@java.lang.Override
public onnx.Onnx.FunctionProtoOrBuilder getFunctionsOrBuilder(
int index) {
return functions_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeInt64(1, irVersion_);
}
if (((bitField0_ & 0x00000002) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, producerName_);
}
if (((bitField0_ & 0x00000004) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, producerVersion_);
}
if (((bitField0_ & 0x00000008) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 4, domain_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeInt64(5, modelVersion_);
}
if (((bitField0_ & 0x00000020) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 6, docString_);
}
if (((bitField0_ & 0x00000040) != 0)) {
output.writeMessage(7, getGraph());
}
for (int i = 0; i < opsetImport_.size(); i++) {
output.writeMessage(8, opsetImport_.get(i));
}
for (int i = 0; i < metadataProps_.size(); i++) {
output.writeMessage(14, metadataProps_.get(i));
}
for (int i = 0; i < trainingInfo_.size(); i++) {
output.writeMessage(20, trainingInfo_.get(i));
}
for (int i = 0; i < functions_.size(); i++) {
output.writeMessage(25, functions_.get(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, irVersion_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, producerName_);
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, producerVersion_);
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, domain_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(5, modelVersion_);
}
if (((bitField0_ & 0x00000020) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, docString_);
}
if (((bitField0_ & 0x00000040) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, getGraph());
}
for (int i = 0; i < opsetImport_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, opsetImport_.get(i));
}
for (int i = 0; i < metadataProps_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(14, metadataProps_.get(i));
}
for (int i = 0; i < trainingInfo_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(20, trainingInfo_.get(i));
}
for (int i = 0; i < functions_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(25, functions_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.ModelProto)) {
return super.equals(obj);
}
onnx.Onnx.ModelProto other = (onnx.Onnx.ModelProto) obj;
if (hasIrVersion() != other.hasIrVersion()) return false;
if (hasIrVersion()) {
if (getIrVersion()
!= other.getIrVersion()) return false;
}
if (!getOpsetImportList()
.equals(other.getOpsetImportList())) return false;
if (hasProducerName() != other.hasProducerName()) return false;
if (hasProducerName()) {
if (!getProducerName()
.equals(other.getProducerName())) return false;
}
if (hasProducerVersion() != other.hasProducerVersion()) return false;
if (hasProducerVersion()) {
if (!getProducerVersion()
.equals(other.getProducerVersion())) return false;
}
if (hasDomain() != other.hasDomain()) return false;
if (hasDomain()) {
if (!getDomain()
.equals(other.getDomain())) return false;
}
if (hasModelVersion() != other.hasModelVersion()) return false;
if (hasModelVersion()) {
if (getModelVersion()
!= other.getModelVersion()) return false;
}
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (hasGraph() != other.hasGraph()) return false;
if (hasGraph()) {
if (!getGraph()
.equals(other.getGraph())) return false;
}
if (!getMetadataPropsList()
.equals(other.getMetadataPropsList())) return false;
if (!getTrainingInfoList()
.equals(other.getTrainingInfoList())) return false;
if (!getFunctionsList()
.equals(other.getFunctionsList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasIrVersion()) {
hash = (37 * hash) + IR_VERSION_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getIrVersion());
}
if (getOpsetImportCount() > 0) {
hash = (37 * hash) + OPSET_IMPORT_FIELD_NUMBER;
hash = (53 * hash) + getOpsetImportList().hashCode();
}
if (hasProducerName()) {
hash = (37 * hash) + PRODUCER_NAME_FIELD_NUMBER;
hash = (53 * hash) + getProducerName().hashCode();
}
if (hasProducerVersion()) {
hash = (37 * hash) + PRODUCER_VERSION_FIELD_NUMBER;
hash = (53 * hash) + getProducerVersion().hashCode();
}
if (hasDomain()) {
hash = (37 * hash) + DOMAIN_FIELD_NUMBER;
hash = (53 * hash) + getDomain().hashCode();
}
if (hasModelVersion()) {
hash = (37 * hash) + MODEL_VERSION_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getModelVersion());
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
if (hasGraph()) {
hash = (37 * hash) + GRAPH_FIELD_NUMBER;
hash = (53 * hash) + getGraph().hashCode();
}
if (getMetadataPropsCount() > 0) {
hash = (37 * hash) + METADATA_PROPS_FIELD_NUMBER;
hash = (53 * hash) + getMetadataPropsList().hashCode();
}
if (getTrainingInfoCount() > 0) {
hash = (37 * hash) + TRAINING_INFO_FIELD_NUMBER;
hash = (53 * hash) + getTrainingInfoList().hashCode();
}
if (getFunctionsCount() > 0) {
hash = (37 * hash) + FUNCTIONS_FIELD_NUMBER;
hash = (53 * hash) + getFunctionsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.ModelProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ModelProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ModelProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ModelProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ModelProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.ModelProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.ModelProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.ModelProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.ModelProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.ModelProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.ModelProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.ModelProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.ModelProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Models
*
* ModelProto is a top-level file/container format for bundling a ML model and
* associating its computation graph with metadata.
*
* The semantics of the model are described by the associated GraphProto's.
*
*
* Protobuf type {@code onnx.ModelProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.ModelProto)
onnx.Onnx.ModelProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_ModelProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_ModelProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.ModelProto.class, onnx.Onnx.ModelProto.Builder.class);
}
// Construct using onnx.Onnx.ModelProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getOpsetImportFieldBuilder();
getGraphFieldBuilder();
getMetadataPropsFieldBuilder();
getTrainingInfoFieldBuilder();
getFunctionsFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
irVersion_ = 0L;
if (opsetImportBuilder_ == null) {
opsetImport_ = java.util.Collections.emptyList();
} else {
opsetImport_ = null;
opsetImportBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
producerName_ = "";
producerVersion_ = "";
domain_ = "";
modelVersion_ = 0L;
docString_ = "";
graph_ = null;
if (graphBuilder_ != null) {
graphBuilder_.dispose();
graphBuilder_ = null;
}
if (metadataPropsBuilder_ == null) {
metadataProps_ = java.util.Collections.emptyList();
} else {
metadataProps_ = null;
metadataPropsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000100);
if (trainingInfoBuilder_ == null) {
trainingInfo_ = java.util.Collections.emptyList();
} else {
trainingInfo_ = null;
trainingInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000200);
if (functionsBuilder_ == null) {
functions_ = java.util.Collections.emptyList();
} else {
functions_ = null;
functionsBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_ModelProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.ModelProto getDefaultInstanceForType() {
return onnx.Onnx.ModelProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.ModelProto build() {
onnx.Onnx.ModelProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.ModelProto buildPartial() {
onnx.Onnx.ModelProto result = new onnx.Onnx.ModelProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.ModelProto result) {
if (opsetImportBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0)) {
opsetImport_ = java.util.Collections.unmodifiableList(opsetImport_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.opsetImport_ = opsetImport_;
} else {
result.opsetImport_ = opsetImportBuilder_.build();
}
if (metadataPropsBuilder_ == null) {
if (((bitField0_ & 0x00000100) != 0)) {
metadataProps_ = java.util.Collections.unmodifiableList(metadataProps_);
bitField0_ = (bitField0_ & ~0x00000100);
}
result.metadataProps_ = metadataProps_;
} else {
result.metadataProps_ = metadataPropsBuilder_.build();
}
if (trainingInfoBuilder_ == null) {
if (((bitField0_ & 0x00000200) != 0)) {
trainingInfo_ = java.util.Collections.unmodifiableList(trainingInfo_);
bitField0_ = (bitField0_ & ~0x00000200);
}
result.trainingInfo_ = trainingInfo_;
} else {
result.trainingInfo_ = trainingInfoBuilder_.build();
}
if (functionsBuilder_ == null) {
if (((bitField0_ & 0x00000400) != 0)) {
functions_ = java.util.Collections.unmodifiableList(functions_);
bitField0_ = (bitField0_ & ~0x00000400);
}
result.functions_ = functions_;
} else {
result.functions_ = functionsBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.ModelProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.irVersion_ = irVersion_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.producerName_ = producerName_;
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.producerVersion_ = producerVersion_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.domain_ = domain_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
result.modelVersion_ = modelVersion_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
result.docString_ = docString_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.graph_ = graphBuilder_ == null
? graph_
: graphBuilder_.build();
to_bitField0_ |= 0x00000040;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.ModelProto) {
return mergeFrom((onnx.Onnx.ModelProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.ModelProto other) {
if (other == onnx.Onnx.ModelProto.getDefaultInstance()) return this;
if (other.hasIrVersion()) {
setIrVersion(other.getIrVersion());
}
if (opsetImportBuilder_ == null) {
if (!other.opsetImport_.isEmpty()) {
if (opsetImport_.isEmpty()) {
opsetImport_ = other.opsetImport_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureOpsetImportIsMutable();
opsetImport_.addAll(other.opsetImport_);
}
onChanged();
}
} else {
if (!other.opsetImport_.isEmpty()) {
if (opsetImportBuilder_.isEmpty()) {
opsetImportBuilder_.dispose();
opsetImportBuilder_ = null;
opsetImport_ = other.opsetImport_;
bitField0_ = (bitField0_ & ~0x00000002);
opsetImportBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getOpsetImportFieldBuilder() : null;
} else {
opsetImportBuilder_.addAllMessages(other.opsetImport_);
}
}
}
if (other.hasProducerName()) {
producerName_ = other.producerName_;
bitField0_ |= 0x00000004;
onChanged();
}
if (other.hasProducerVersion()) {
producerVersion_ = other.producerVersion_;
bitField0_ |= 0x00000008;
onChanged();
}
if (other.hasDomain()) {
domain_ = other.domain_;
bitField0_ |= 0x00000010;
onChanged();
}
if (other.hasModelVersion()) {
setModelVersion(other.getModelVersion());
}
if (other.hasDocString()) {
docString_ = other.docString_;
bitField0_ |= 0x00000040;
onChanged();
}
if (other.hasGraph()) {
mergeGraph(other.getGraph());
}
if (metadataPropsBuilder_ == null) {
if (!other.metadataProps_.isEmpty()) {
if (metadataProps_.isEmpty()) {
metadataProps_ = other.metadataProps_;
bitField0_ = (bitField0_ & ~0x00000100);
} else {
ensureMetadataPropsIsMutable();
metadataProps_.addAll(other.metadataProps_);
}
onChanged();
}
} else {
if (!other.metadataProps_.isEmpty()) {
if (metadataPropsBuilder_.isEmpty()) {
metadataPropsBuilder_.dispose();
metadataPropsBuilder_ = null;
metadataProps_ = other.metadataProps_;
bitField0_ = (bitField0_ & ~0x00000100);
metadataPropsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getMetadataPropsFieldBuilder() : null;
} else {
metadataPropsBuilder_.addAllMessages(other.metadataProps_);
}
}
}
if (trainingInfoBuilder_ == null) {
if (!other.trainingInfo_.isEmpty()) {
if (trainingInfo_.isEmpty()) {
trainingInfo_ = other.trainingInfo_;
bitField0_ = (bitField0_ & ~0x00000200);
} else {
ensureTrainingInfoIsMutable();
trainingInfo_.addAll(other.trainingInfo_);
}
onChanged();
}
} else {
if (!other.trainingInfo_.isEmpty()) {
if (trainingInfoBuilder_.isEmpty()) {
trainingInfoBuilder_.dispose();
trainingInfoBuilder_ = null;
trainingInfo_ = other.trainingInfo_;
bitField0_ = (bitField0_ & ~0x00000200);
trainingInfoBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getTrainingInfoFieldBuilder() : null;
} else {
trainingInfoBuilder_.addAllMessages(other.trainingInfo_);
}
}
}
if (functionsBuilder_ == null) {
if (!other.functions_.isEmpty()) {
if (functions_.isEmpty()) {
functions_ = other.functions_;
bitField0_ = (bitField0_ & ~0x00000400);
} else {
ensureFunctionsIsMutable();
functions_.addAll(other.functions_);
}
onChanged();
}
} else {
if (!other.functions_.isEmpty()) {
if (functionsBuilder_.isEmpty()) {
functionsBuilder_.dispose();
functionsBuilder_ = null;
functions_ = other.functions_;
bitField0_ = (bitField0_ & ~0x00000400);
functionsBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getFunctionsFieldBuilder() : null;
} else {
functionsBuilder_.addAllMessages(other.functions_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
irVersion_ = input.readInt64();
bitField0_ |= 0x00000001;
break;
} // case 8
case 18: {
producerName_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 18
case 26: {
producerVersion_ = input.readBytes();
bitField0_ |= 0x00000008;
break;
} // case 26
case 34: {
domain_ = input.readBytes();
bitField0_ |= 0x00000010;
break;
} // case 34
case 40: {
modelVersion_ = input.readInt64();
bitField0_ |= 0x00000020;
break;
} // case 40
case 50: {
docString_ = input.readBytes();
bitField0_ |= 0x00000040;
break;
} // case 50
case 58: {
input.readMessage(
getGraphFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000080;
break;
} // case 58
case 66: {
onnx.Onnx.OperatorSetIdProto m =
input.readMessage(
onnx.Onnx.OperatorSetIdProto.PARSER,
extensionRegistry);
if (opsetImportBuilder_ == null) {
ensureOpsetImportIsMutable();
opsetImport_.add(m);
} else {
opsetImportBuilder_.addMessage(m);
}
break;
} // case 66
case 114: {
onnx.Onnx.StringStringEntryProto m =
input.readMessage(
onnx.Onnx.StringStringEntryProto.PARSER,
extensionRegistry);
if (metadataPropsBuilder_ == null) {
ensureMetadataPropsIsMutable();
metadataProps_.add(m);
} else {
metadataPropsBuilder_.addMessage(m);
}
break;
} // case 114
case 162: {
onnx.Onnx.TrainingInfoProto m =
input.readMessage(
onnx.Onnx.TrainingInfoProto.PARSER,
extensionRegistry);
if (trainingInfoBuilder_ == null) {
ensureTrainingInfoIsMutable();
trainingInfo_.add(m);
} else {
trainingInfoBuilder_.addMessage(m);
}
break;
} // case 162
case 202: {
onnx.Onnx.FunctionProto m =
input.readMessage(
onnx.Onnx.FunctionProto.PARSER,
extensionRegistry);
if (functionsBuilder_ == null) {
ensureFunctionsIsMutable();
functions_.add(m);
} else {
functionsBuilder_.addMessage(m);
}
break;
} // case 202
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long irVersion_ ;
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return Whether the irVersion field is set.
*/
@java.lang.Override
public boolean hasIrVersion() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @return The irVersion.
*/
@java.lang.Override
public long getIrVersion() {
return irVersion_;
}
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
*
* optional int64 ir_version = 1;
* @param value The irVersion to set.
* @return This builder for chaining.
*/
public Builder setIrVersion(long value) {
irVersion_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* The version of the IR this model targets. See Version enum above.
* This field MUST be present.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public int getOpsetImportCount() {
if (opsetImportBuilder_ == null) {
return opsetImport_.size();
} else {
return opsetImportBuilder_.getCount();
}
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public Builder setOpsetImport(
int index, onnx.Onnx.OperatorSetIdProto value) {
if (opsetImportBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOpsetImportIsMutable();
opsetImport_.set(index, value);
onChanged();
} else {
opsetImportBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public Builder addOpsetImport(onnx.Onnx.OperatorSetIdProto value) {
if (opsetImportBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOpsetImportIsMutable();
opsetImport_.add(value);
onChanged();
} else {
opsetImportBuilder_.addMessage(value);
}
return this;
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public Builder addOpsetImport(
int index, onnx.Onnx.OperatorSetIdProto value) {
if (opsetImportBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureOpsetImportIsMutable();
opsetImport_.add(index, value);
onChanged();
} else {
opsetImportBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public onnx.Onnx.OperatorSetIdProto.Builder getOpsetImportBuilder(
int index) {
return getOpsetImportFieldBuilder().getBuilder(index);
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
*
* repeated .onnx.OperatorSetIdProto opset_import = 8;
*/
public onnx.Onnx.OperatorSetIdProtoOrBuilder getOpsetImportOrBuilder(
int index) {
if (opsetImportBuilder_ == null) {
return opsetImport_.get(index); } else {
return opsetImportBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The OperatorSets this model relies on.
* All ModelProtos MUST have at least one entry that
* specifies which version of the ONNX OperatorSet is
* being imported.
*
* All nodes in the ModelProto's graph will bind against the operator
* with the same-domain/same-op_type operator with the HIGHEST version
* in the referenced operator sets.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return Whether the producerName field is set.
*/
public boolean hasProducerName() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @return The bytes for producerName.
*/
public com.google.protobuf.ByteString
getProducerNameBytes() {
java.lang.Object ref = producerName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
producerName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @param value The producerName to set.
* @return This builder for chaining.
*/
public Builder setProducerName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
producerName_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The name of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_name = 2;
* @param value The bytes for producerName to set.
* @return This builder for chaining.
*/
public Builder setProducerNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
producerName_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
private java.lang.Object producerVersion_ = "";
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return Whether the producerVersion field is set.
*/
public boolean hasProducerVersion() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @return The bytes for producerVersion.
*/
public com.google.protobuf.ByteString
getProducerVersionBytes() {
java.lang.Object ref = producerVersion_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
producerVersion_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @param value The producerVersion to set.
* @return This builder for chaining.
*/
public Builder setProducerVersion(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
producerVersion_ = value;
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
* The version of the framework or tool used to generate this model.
* This field SHOULD be present to indicate which implementation/tool/framework
* emitted the model.
*
*
* optional string producer_version = 3;
* @param value The bytes for producerVersion to set.
* @return This builder for chaining.
*/
public Builder setProducerVersionBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
producerVersion_ = value;
bitField0_ |= 0x00000008;
onChanged();
return this;
}
private java.lang.Object domain_ = "";
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return Whether the domain field is set.
*/
public boolean hasDomain() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @return The bytes for domain.
*/
public com.google.protobuf.ByteString
getDomainBytes() {
java.lang.Object ref = domain_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
domain_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @param value The domain to set.
* @return This builder for chaining.
*/
public Builder setDomain(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
domain_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
* Domain name of the model.
* We use reverse domain names as name space indicators. For example:
* `com.facebook.fair` or `com.microsoft.cognitiveservices`
*
* Together with `model_version` and GraphProto.name, this forms the unique identity of
* the graph.
*
*
* optional string domain = 4;
* @param value The bytes for domain to set.
* @return This builder for chaining.
*/
public Builder setDomainBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
domain_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
private long modelVersion_ ;
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return Whether the modelVersion field is set.
*/
@java.lang.Override
public boolean hasModelVersion() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @return The modelVersion.
*/
@java.lang.Override
public long getModelVersion() {
return modelVersion_;
}
/**
*
* The version of the graph encoded. See Version enum below.
*
*
* optional int64 model_version = 5;
* @param value The modelVersion to set.
* @return This builder for chaining.
*/
public Builder setModelVersion(long value) {
modelVersion_ = value;
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
* The version of the graph encoded. See Version enum below.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public int getTrainingInfoCount() {
if (trainingInfoBuilder_ == null) {
return trainingInfo_.size();
} else {
return trainingInfoBuilder_.getCount();
}
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public Builder setTrainingInfo(
int index, onnx.Onnx.TrainingInfoProto value) {
if (trainingInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTrainingInfoIsMutable();
trainingInfo_.set(index, value);
onChanged();
} else {
trainingInfoBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public Builder addTrainingInfo(onnx.Onnx.TrainingInfoProto value) {
if (trainingInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTrainingInfoIsMutable();
trainingInfo_.add(value);
onChanged();
} else {
trainingInfoBuilder_.addMessage(value);
}
return this;
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public Builder addTrainingInfo(
int index, onnx.Onnx.TrainingInfoProto value) {
if (trainingInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTrainingInfoIsMutable();
trainingInfo_.add(index, value);
onChanged();
} else {
trainingInfoBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public onnx.Onnx.TrainingInfoProto.Builder getTrainingInfoBuilder(
int index) {
return getTrainingInfoFieldBuilder().getBuilder(index);
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
*
* repeated .onnx.TrainingInfoProto training_info = 20;
*/
public onnx.Onnx.TrainingInfoProtoOrBuilder getTrainingInfoOrBuilder(
int index) {
if (trainingInfoBuilder_ == null) {
return trainingInfo_.get(index); } else {
return trainingInfoBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* Training-specific information. Sequentially executing all stored
* `TrainingInfoProto.algorithm`s and assigning their outputs following
* the corresponding `TrainingInfoProto.update_binding`s is one training
* iteration. Similarly, to initialize the model
* (as if training hasn't happened), the user should sequentially execute
* all stored `TrainingInfoProto.initialization`s and assigns their outputs
* using `TrainingInfoProto.initialization_binding`s.
*
* If this field is empty, the training behavior of the model is undefined.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public int getFunctionsCount() {
if (functionsBuilder_ == null) {
return functions_.size();
} else {
return functionsBuilder_.getCount();
}
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public Builder setFunctions(
int index, onnx.Onnx.FunctionProto value) {
if (functionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFunctionsIsMutable();
functions_.set(index, value);
onChanged();
} else {
functionsBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public Builder addFunctions(onnx.Onnx.FunctionProto value) {
if (functionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFunctionsIsMutable();
functions_.add(value);
onChanged();
} else {
functionsBuilder_.addMessage(value);
}
return this;
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public Builder addFunctions(
int index, onnx.Onnx.FunctionProto value) {
if (functionsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFunctionsIsMutable();
functions_.add(index, value);
onChanged();
} else {
functionsBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public onnx.Onnx.FunctionProto.Builder getFunctionsBuilder(
int index) {
return getFunctionsFieldBuilder().getBuilder(index);
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public onnx.Onnx.FunctionProtoOrBuilder getFunctionsOrBuilder(
int index) {
if (functionsBuilder_ == null) {
return functions_.get(index); } else {
return functionsBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
* A list of function protos local to the model.
*
* Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
* In case of any conflicts the behavior (whether the model local functions are given higher priority,
* or standard operator sets are given higher priotity or this is treated as error) is defined by
* the runtimes.
*
* The operator sets imported by FunctionProto should be compatible with the ones
* imported by ModelProto and other model local FunctionProtos.
* Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
* or by 2 FunctionProtos then versions for the operator set may be different but,
* the operator schema returned for op_type, domain, version combination
* for both the versions should be same for every node in the function body.
*
* One FunctionProto can reference other FunctionProto in the model, however, recursive reference
* is not allowed.
*
*
* repeated .onnx.FunctionProto functions = 25;
*/
public java.util.List
getFunctionsBuilderList() {
return getFunctionsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.FunctionProto, onnx.Onnx.FunctionProto.Builder, onnx.Onnx.FunctionProtoOrBuilder>
getFunctionsFieldBuilder() {
if (functionsBuilder_ == null) {
functionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.FunctionProto, onnx.Onnx.FunctionProto.Builder, onnx.Onnx.FunctionProtoOrBuilder>(
functions_,
((bitField0_ & 0x00000400) != 0),
getParentForChildren(),
isClean());
functions_ = null;
}
return functionsBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.ModelProto)
}
// @@protoc_insertion_point(class_scope:onnx.ModelProto)
private static final onnx.Onnx.ModelProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.ModelProto();
}
public static onnx.Onnx.ModelProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public ModelProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.ModelProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface StringStringEntryProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.StringStringEntryProto)
com.google.protobuf.MessageOrBuilder {
/**
* optional string key = 1;
* @return Whether the key field is set.
*/
boolean hasKey();
/**
* optional string key = 1;
* @return The key.
*/
java.lang.String getKey();
/**
* optional string key = 1;
* @return The bytes for key.
*/
com.google.protobuf.ByteString
getKeyBytes();
/**
* optional string value = 2;
* @return Whether the value field is set.
*/
boolean hasValue();
/**
* optional string value = 2;
* @return The value.
*/
java.lang.String getValue();
/**
* optional string value = 2;
* @return The bytes for value.
*/
com.google.protobuf.ByteString
getValueBytes();
}
/**
*
* StringStringEntryProto follows the pattern for cross-proto-version maps.
* See https://developers.google.com/protocol-buffers/docs/proto3#maps
*
*
* Protobuf type {@code onnx.StringStringEntryProto}
*/
public static final class StringStringEntryProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.StringStringEntryProto)
StringStringEntryProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use StringStringEntryProto.newBuilder() to construct.
private StringStringEntryProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private StringStringEntryProto() {
key_ = "";
value_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new StringStringEntryProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_StringStringEntryProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_StringStringEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.StringStringEntryProto.class, onnx.Onnx.StringStringEntryProto.Builder.class);
}
private int bitField0_;
public static final int KEY_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private volatile java.lang.Object key_ = "";
/**
* optional string key = 1;
* @return Whether the key field is set.
*/
@java.lang.Override
public boolean hasKey() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional string key = 1;
* @return The key.
*/
@java.lang.Override
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
/**
* optional string key = 1;
* @return The bytes for key.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int VALUE_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object value_ = "";
/**
* optional string value = 2;
* @return Whether the value field is set.
*/
@java.lang.Override
public boolean hasValue() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string value = 2;
* @return The value.
*/
@java.lang.Override
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
}
}
/**
* optional string value = 2;
* @return The bytes for value.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_);
}
if (((bitField0_ & 0x00000002) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.StringStringEntryProto)) {
return super.equals(obj);
}
onnx.Onnx.StringStringEntryProto other = (onnx.Onnx.StringStringEntryProto) obj;
if (hasKey() != other.hasKey()) return false;
if (hasKey()) {
if (!getKey()
.equals(other.getKey())) return false;
}
if (hasValue() != other.hasValue()) return false;
if (hasValue()) {
if (!getValue()
.equals(other.getValue())) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasKey()) {
hash = (37 * hash) + KEY_FIELD_NUMBER;
hash = (53 * hash) + getKey().hashCode();
}
if (hasValue()) {
hash = (37 * hash) + VALUE_FIELD_NUMBER;
hash = (53 * hash) + getValue().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.StringStringEntryProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.StringStringEntryProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.StringStringEntryProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.StringStringEntryProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* StringStringEntryProto follows the pattern for cross-proto-version maps.
* See https://developers.google.com/protocol-buffers/docs/proto3#maps
*
*
* Protobuf type {@code onnx.StringStringEntryProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.StringStringEntryProto)
onnx.Onnx.StringStringEntryProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_StringStringEntryProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_StringStringEntryProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.StringStringEntryProto.class, onnx.Onnx.StringStringEntryProto.Builder.class);
}
// Construct using onnx.Onnx.StringStringEntryProto.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
key_ = "";
value_ = "";
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_StringStringEntryProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.StringStringEntryProto getDefaultInstanceForType() {
return onnx.Onnx.StringStringEntryProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.StringStringEntryProto build() {
onnx.Onnx.StringStringEntryProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.StringStringEntryProto buildPartial() {
onnx.Onnx.StringStringEntryProto result = new onnx.Onnx.StringStringEntryProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(onnx.Onnx.StringStringEntryProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.key_ = key_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.value_ = value_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.StringStringEntryProto) {
return mergeFrom((onnx.Onnx.StringStringEntryProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.StringStringEntryProto other) {
if (other == onnx.Onnx.StringStringEntryProto.getDefaultInstance()) return this;
if (other.hasKey()) {
key_ = other.key_;
bitField0_ |= 0x00000001;
onChanged();
}
if (other.hasValue()) {
value_ = other.value_;
bitField0_ |= 0x00000002;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
key_ = input.readBytes();
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
value_ = input.readBytes();
bitField0_ |= 0x00000002;
break;
} // case 18
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private java.lang.Object key_ = "";
/**
* optional string key = 1;
* @return Whether the key field is set.
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional string key = 1;
* @return The key.
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string key = 1;
* @return The bytes for key.
*/
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string key = 1;
* @param value The key to set.
* @return This builder for chaining.
*/
public Builder setKey(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
key_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* optional string key = 1;
* @return This builder for chaining.
*/
public Builder clearKey() {
key_ = getDefaultInstance().getKey();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* optional string key = 1;
* @param value The bytes for key to set.
* @return This builder for chaining.
*/
public Builder setKeyBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
key_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private java.lang.Object value_ = "";
/**
* optional string value = 2;
* @return Whether the value field is set.
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional string value = 2;
* @return The value.
*/
public java.lang.String getValue() {
java.lang.Object ref = value_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
value_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string value = 2;
* @return The bytes for value.
*/
public com.google.protobuf.ByteString
getValueBytes() {
java.lang.Object ref = value_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
value_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string value = 2;
* @param value The value to set.
* @return This builder for chaining.
*/
public Builder setValue(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
value_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional string value = 2;
* @return This builder for chaining.
*/
public Builder clearValue() {
value_ = getDefaultInstance().getValue();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
* optional string value = 2;
* @param value The bytes for value to set.
* @return This builder for chaining.
*/
public Builder setValueBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
value_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.StringStringEntryProto)
}
// @@protoc_insertion_point(class_scope:onnx.StringStringEntryProto)
private static final onnx.Onnx.StringStringEntryProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.StringStringEntryProto();
}
public static onnx.Onnx.StringStringEntryProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public StringStringEntryProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.StringStringEntryProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TensorAnnotationOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TensorAnnotation)
com.google.protobuf.MessageOrBuilder {
/**
* optional string tensor_name = 1;
* @return Whether the tensorName field is set.
*/
boolean hasTensorName();
/**
* optional string tensor_name = 1;
* @return The tensorName.
*/
java.lang.String getTensorName();
/**
* optional string tensor_name = 1;
* @return The bytes for tensorName.
*/
com.google.protobuf.ByteString
getTensorNameBytes();
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
onnx.Onnx.StringStringEntryProtoOrBuilder getQuantParameterTensorNamesOrBuilder(
int index);
}
/**
* Protobuf type {@code onnx.TensorAnnotation}
*/
public static final class TensorAnnotation extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.TensorAnnotation)
TensorAnnotationOrBuilder {
private static final long serialVersionUID = 0L;
// Use TensorAnnotation.newBuilder() to construct.
private TensorAnnotation(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TensorAnnotation() {
tensorName_ = "";
quantParameterTensorNames_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new TensorAnnotation();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorAnnotation_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorAnnotation_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorAnnotation.class, onnx.Onnx.TensorAnnotation.Builder.class);
}
private int bitField0_;
public static final int TENSOR_NAME_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private volatile java.lang.Object tensorName_ = "";
/**
* optional string tensor_name = 1;
* @return Whether the tensorName field is set.
*/
@java.lang.Override
public boolean hasTensorName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional string tensor_name = 1;
* @return The tensorName.
*/
@java.lang.Override
public java.lang.String getTensorName() {
java.lang.Object ref = tensorName_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
tensorName_ = s;
}
return s;
}
}
/**
* optional string tensor_name = 1;
* @return The bytes for tensorName.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getTensorNameBytes() {
java.lang.Object ref = tensorName_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
tensorName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int QUANT_PARAMETER_TENSOR_NAMES_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private java.util.List quantParameterTensorNames_;
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
@java.lang.Override
public int getQuantParameterTensorNamesCount() {
return quantParameterTensorNames_.size();
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
@java.lang.Override
public onnx.Onnx.StringStringEntryProtoOrBuilder getQuantParameterTensorNamesOrBuilder(
int index) {
return quantParameterTensorNames_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tensorName_);
}
for (int i = 0; i < quantParameterTensorNames_.size(); i++) {
output.writeMessage(2, quantParameterTensorNames_.get(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, tensorName_);
}
for (int i = 0; i < quantParameterTensorNames_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, quantParameterTensorNames_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.TensorAnnotation)) {
return super.equals(obj);
}
onnx.Onnx.TensorAnnotation other = (onnx.Onnx.TensorAnnotation) obj;
if (hasTensorName() != other.hasTensorName()) return false;
if (hasTensorName()) {
if (!getTensorName()
.equals(other.getTensorName())) return false;
}
if (!getQuantParameterTensorNamesList()
.equals(other.getQuantParameterTensorNamesList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasTensorName()) {
hash = (37 * hash) + TENSOR_NAME_FIELD_NUMBER;
hash = (53 * hash) + getTensorName().hashCode();
}
if (getQuantParameterTensorNamesCount() > 0) {
hash = (37 * hash) + QUANT_PARAMETER_TENSOR_NAMES_FIELD_NUMBER;
hash = (53 * hash) + getQuantParameterTensorNamesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.TensorAnnotation parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorAnnotation parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorAnnotation parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorAnnotation parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorAnnotation parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorAnnotation parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.TensorAnnotation prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code onnx.TensorAnnotation}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.TensorAnnotation)
onnx.Onnx.TensorAnnotationOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorAnnotation_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorAnnotation_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorAnnotation.class, onnx.Onnx.TensorAnnotation.Builder.class);
}
// Construct using onnx.Onnx.TensorAnnotation.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
tensorName_ = "";
if (quantParameterTensorNamesBuilder_ == null) {
quantParameterTensorNames_ = java.util.Collections.emptyList();
} else {
quantParameterTensorNames_ = null;
quantParameterTensorNamesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_TensorAnnotation_descriptor;
}
@java.lang.Override
public onnx.Onnx.TensorAnnotation getDefaultInstanceForType() {
return onnx.Onnx.TensorAnnotation.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.TensorAnnotation build() {
onnx.Onnx.TensorAnnotation result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.TensorAnnotation buildPartial() {
onnx.Onnx.TensorAnnotation result = new onnx.Onnx.TensorAnnotation(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.TensorAnnotation result) {
if (quantParameterTensorNamesBuilder_ == null) {
if (((bitField0_ & 0x00000002) != 0)) {
quantParameterTensorNames_ = java.util.Collections.unmodifiableList(quantParameterTensorNames_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.quantParameterTensorNames_ = quantParameterTensorNames_;
} else {
result.quantParameterTensorNames_ = quantParameterTensorNamesBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.TensorAnnotation result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.tensorName_ = tensorName_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.TensorAnnotation) {
return mergeFrom((onnx.Onnx.TensorAnnotation)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.TensorAnnotation other) {
if (other == onnx.Onnx.TensorAnnotation.getDefaultInstance()) return this;
if (other.hasTensorName()) {
tensorName_ = other.tensorName_;
bitField0_ |= 0x00000001;
onChanged();
}
if (quantParameterTensorNamesBuilder_ == null) {
if (!other.quantParameterTensorNames_.isEmpty()) {
if (quantParameterTensorNames_.isEmpty()) {
quantParameterTensorNames_ = other.quantParameterTensorNames_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureQuantParameterTensorNamesIsMutable();
quantParameterTensorNames_.addAll(other.quantParameterTensorNames_);
}
onChanged();
}
} else {
if (!other.quantParameterTensorNames_.isEmpty()) {
if (quantParameterTensorNamesBuilder_.isEmpty()) {
quantParameterTensorNamesBuilder_.dispose();
quantParameterTensorNamesBuilder_ = null;
quantParameterTensorNames_ = other.quantParameterTensorNames_;
bitField0_ = (bitField0_ & ~0x00000002);
quantParameterTensorNamesBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getQuantParameterTensorNamesFieldBuilder() : null;
} else {
quantParameterTensorNamesBuilder_.addAllMessages(other.quantParameterTensorNames_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
tensorName_ = input.readBytes();
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
onnx.Onnx.StringStringEntryProto m =
input.readMessage(
onnx.Onnx.StringStringEntryProto.PARSER,
extensionRegistry);
if (quantParameterTensorNamesBuilder_ == null) {
ensureQuantParameterTensorNamesIsMutable();
quantParameterTensorNames_.add(m);
} else {
quantParameterTensorNamesBuilder_.addMessage(m);
}
break;
} // case 18
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private java.lang.Object tensorName_ = "";
/**
* optional string tensor_name = 1;
* @return Whether the tensorName field is set.
*/
public boolean hasTensorName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional string tensor_name = 1;
* @return The tensorName.
*/
public java.lang.String getTensorName() {
java.lang.Object ref = tensorName_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
tensorName_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string tensor_name = 1;
* @return The bytes for tensorName.
*/
public com.google.protobuf.ByteString
getTensorNameBytes() {
java.lang.Object ref = tensorName_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
tensorName_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string tensor_name = 1;
* @param value The tensorName to set.
* @return This builder for chaining.
*/
public Builder setTensorName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
tensorName_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* optional string tensor_name = 1;
* @return This builder for chaining.
*/
public Builder clearTensorName() {
tensorName_ = getDefaultInstance().getTensorName();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
/**
* optional string tensor_name = 1;
* @param value The bytes for tensorName to set.
* @return This builder for chaining.
*/
public Builder setTensorNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
tensorName_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
private java.util.List quantParameterTensorNames_ =
java.util.Collections.emptyList();
private void ensureQuantParameterTensorNamesIsMutable() {
if (!((bitField0_ & 0x00000002) != 0)) {
quantParameterTensorNames_ = new java.util.ArrayList(quantParameterTensorNames_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.StringStringEntryProto, onnx.Onnx.StringStringEntryProto.Builder, onnx.Onnx.StringStringEntryProtoOrBuilder> quantParameterTensorNamesBuilder_;
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public int getQuantParameterTensorNamesCount() {
if (quantParameterTensorNamesBuilder_ == null) {
return quantParameterTensorNames_.size();
} else {
return quantParameterTensorNamesBuilder_.getCount();
}
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public Builder setQuantParameterTensorNames(
int index, onnx.Onnx.StringStringEntryProto value) {
if (quantParameterTensorNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantParameterTensorNamesIsMutable();
quantParameterTensorNames_.set(index, value);
onChanged();
} else {
quantParameterTensorNamesBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public Builder addQuantParameterTensorNames(onnx.Onnx.StringStringEntryProto value) {
if (quantParameterTensorNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantParameterTensorNamesIsMutable();
quantParameterTensorNames_.add(value);
onChanged();
} else {
quantParameterTensorNamesBuilder_.addMessage(value);
}
return this;
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public Builder addQuantParameterTensorNames(
int index, onnx.Onnx.StringStringEntryProto value) {
if (quantParameterTensorNamesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantParameterTensorNamesIsMutable();
quantParameterTensorNames_.add(index, value);
onChanged();
} else {
quantParameterTensorNamesBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public onnx.Onnx.StringStringEntryProto.Builder getQuantParameterTensorNamesBuilder(
int index) {
return getQuantParameterTensorNamesFieldBuilder().getBuilder(index);
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public onnx.Onnx.StringStringEntryProtoOrBuilder getQuantParameterTensorNamesOrBuilder(
int index) {
if (quantParameterTensorNamesBuilder_ == null) {
return quantParameterTensorNames_.get(index); } else {
return quantParameterTensorNamesBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
* <key, value> pairs to annotate tensor specified by <tensor_name> above.
* The keys used in the mapping below must be pre-defined in ONNX spec.
* For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
* quantization parameter keys.
*
*
* repeated .onnx.StringStringEntryProto quant_parameter_tensor_names = 2;
*/
public java.util.List
getQuantParameterTensorNamesBuilderList() {
return getQuantParameterTensorNamesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.StringStringEntryProto, onnx.Onnx.StringStringEntryProto.Builder, onnx.Onnx.StringStringEntryProtoOrBuilder>
getQuantParameterTensorNamesFieldBuilder() {
if (quantParameterTensorNamesBuilder_ == null) {
quantParameterTensorNamesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.StringStringEntryProto, onnx.Onnx.StringStringEntryProto.Builder, onnx.Onnx.StringStringEntryProtoOrBuilder>(
quantParameterTensorNames_,
((bitField0_ & 0x00000002) != 0),
getParentForChildren(),
isClean());
quantParameterTensorNames_ = null;
}
return quantParameterTensorNamesBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.TensorAnnotation)
}
// @@protoc_insertion_point(class_scope:onnx.TensorAnnotation)
private static final onnx.Onnx.TensorAnnotation DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.TensorAnnotation();
}
public static onnx.Onnx.TensorAnnotation getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public TensorAnnotation parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.TensorAnnotation getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface GraphProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.GraphProto)
com.google.protobuf.MessageOrBuilder {
/**
*
*
* optional string name = 2;
* @return Whether the name field is set.
*/
boolean hasName();
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The name.
*/
java.lang.String getName();
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The bytes for name.
*/
com.google.protobuf.ByteString
getNameBytes();
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* Graphs
*
* A graph defines the computational logic of a model and is comprised of a parameterized
* list of nodes that form a directed acyclic graph based on their inputs and outputs.
* This is the equivalent of the "network" or "graph" in many deep learning
* frameworks.
*
*
* Protobuf type {@code onnx.GraphProto}
*/
public static final class GraphProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.GraphProto)
GraphProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use GraphProto.newBuilder() to construct.
private GraphProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private GraphProto() {
node_ = java.util.Collections.emptyList();
name_ = "";
initializer_ = java.util.Collections.emptyList();
sparseInitializer_ = java.util.Collections.emptyList();
docString_ = "";
input_ = java.util.Collections.emptyList();
output_ = java.util.Collections.emptyList();
valueInfo_ = java.util.Collections.emptyList();
quantizationAnnotation_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new GraphProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_GraphProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_GraphProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.GraphProto.class, onnx.Onnx.GraphProto.Builder.class);
}
private int bitField0_;
public static final int NODE_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private java.util.List node_;
/**
*
*
* repeated .onnx.NodeProto node = 1;
*/
@java.lang.Override
public onnx.Onnx.NodeProtoOrBuilder getNodeOrBuilder(
int index) {
return node_.get(index);
}
public static final int NAME_FIELD_NUMBER = 2;
@SuppressWarnings("serial")
private volatile java.lang.Object name_ = "";
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return Whether the name field is set.
*/
@java.lang.Override
public boolean hasName() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The name.
*/
@java.lang.Override
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The bytes for name.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int INITIALIZER_FIELD_NUMBER = 5;
@SuppressWarnings("serial")
private java.util.List initializer_;
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
@java.lang.Override
public int getInitializerCount() {
return initializer_.size();
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
@java.lang.Override
public onnx.Onnx.TensorProtoOrBuilder getInitializerOrBuilder(
int index) {
return initializer_.get(index);
}
public static final int SPARSE_INITIALIZER_FIELD_NUMBER = 15;
@SuppressWarnings("serial")
private java.util.List sparseInitializer_;
/**
*
* Initializers (see above) stored in sparse format.
*
* Information for the values in the graph. The ValueInfoProto.name's
* must be distinct. It is optional for a value to appear in value_info list.
*
*
* repeated .onnx.ValueInfoProto value_info = 13;
*/
@java.lang.Override
public onnx.Onnx.ValueInfoProtoOrBuilder getValueInfoOrBuilder(
int index) {
return valueInfo_.get(index);
}
public static final int QUANTIZATION_ANNOTATION_FIELD_NUMBER = 14;
@SuppressWarnings("serial")
private java.util.List quantizationAnnotation_;
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
@java.lang.Override
public int getQuantizationAnnotationCount() {
return quantizationAnnotation_.size();
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
@java.lang.Override
public onnx.Onnx.TensorAnnotationOrBuilder getQuantizationAnnotationOrBuilder(
int index) {
return quantizationAnnotation_.get(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
for (int i = 0; i < node_.size(); i++) {
output.writeMessage(1, node_.get(i));
}
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
for (int i = 0; i < initializer_.size(); i++) {
output.writeMessage(5, initializer_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 10, docString_);
}
for (int i = 0; i < input_.size(); i++) {
output.writeMessage(11, input_.get(i));
}
for (int i = 0; i < output_.size(); i++) {
output.writeMessage(12, output_.get(i));
}
for (int i = 0; i < valueInfo_.size(); i++) {
output.writeMessage(13, valueInfo_.get(i));
}
for (int i = 0; i < quantizationAnnotation_.size(); i++) {
output.writeMessage(14, quantizationAnnotation_.get(i));
}
for (int i = 0; i < sparseInitializer_.size(); i++) {
output.writeMessage(15, sparseInitializer_.get(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < node_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, node_.get(i));
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
for (int i = 0; i < initializer_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, initializer_.get(i));
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, docString_);
}
for (int i = 0; i < input_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(11, input_.get(i));
}
for (int i = 0; i < output_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(12, output_.get(i));
}
for (int i = 0; i < valueInfo_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(13, valueInfo_.get(i));
}
for (int i = 0; i < quantizationAnnotation_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(14, quantizationAnnotation_.get(i));
}
for (int i = 0; i < sparseInitializer_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, sparseInitializer_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.GraphProto)) {
return super.equals(obj);
}
onnx.Onnx.GraphProto other = (onnx.Onnx.GraphProto) obj;
if (!getNodeList()
.equals(other.getNodeList())) return false;
if (hasName() != other.hasName()) return false;
if (hasName()) {
if (!getName()
.equals(other.getName())) return false;
}
if (!getInitializerList()
.equals(other.getInitializerList())) return false;
if (!getSparseInitializerList()
.equals(other.getSparseInitializerList())) return false;
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (!getInputList()
.equals(other.getInputList())) return false;
if (!getOutputList()
.equals(other.getOutputList())) return false;
if (!getValueInfoList()
.equals(other.getValueInfoList())) return false;
if (!getQuantizationAnnotationList()
.equals(other.getQuantizationAnnotationList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getNodeCount() > 0) {
hash = (37 * hash) + NODE_FIELD_NUMBER;
hash = (53 * hash) + getNodeList().hashCode();
}
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (getInitializerCount() > 0) {
hash = (37 * hash) + INITIALIZER_FIELD_NUMBER;
hash = (53 * hash) + getInitializerList().hashCode();
}
if (getSparseInitializerCount() > 0) {
hash = (37 * hash) + SPARSE_INITIALIZER_FIELD_NUMBER;
hash = (53 * hash) + getSparseInitializerList().hashCode();
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
if (getInputCount() > 0) {
hash = (37 * hash) + INPUT_FIELD_NUMBER;
hash = (53 * hash) + getInputList().hashCode();
}
if (getOutputCount() > 0) {
hash = (37 * hash) + OUTPUT_FIELD_NUMBER;
hash = (53 * hash) + getOutputList().hashCode();
}
if (getValueInfoCount() > 0) {
hash = (37 * hash) + VALUE_INFO_FIELD_NUMBER;
hash = (53 * hash) + getValueInfoList().hashCode();
}
if (getQuantizationAnnotationCount() > 0) {
hash = (37 * hash) + QUANTIZATION_ANNOTATION_FIELD_NUMBER;
hash = (53 * hash) + getQuantizationAnnotationList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.GraphProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.GraphProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.GraphProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.GraphProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.GraphProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.GraphProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.GraphProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.GraphProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.GraphProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.GraphProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.GraphProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.GraphProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.GraphProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Graphs
*
* A graph defines the computational logic of a model and is comprised of a parameterized
* list of nodes that form a directed acyclic graph based on their inputs and outputs.
* This is the equivalent of the "network" or "graph" in many deep learning
* frameworks.
*
*
* optional string name = 2;
* @return Whether the name field is set.
*/
public boolean hasName() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The name.
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return The bytes for name.
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @param value The name to set.
* @return This builder for chaining.
*/
public Builder setName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @return This builder for chaining.
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
/**
*
* The name of the graph.
*
*
* optional string name = 2;
* @param value The bytes for name to set.
* @return This builder for chaining.
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
private java.util.List initializer_ =
java.util.Collections.emptyList();
private void ensureInitializerIsMutable() {
if (!((bitField0_ & 0x00000004) != 0)) {
initializer_ = new java.util.ArrayList(initializer_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.TensorProto, onnx.Onnx.TensorProto.Builder, onnx.Onnx.TensorProtoOrBuilder> initializerBuilder_;
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public int getInitializerCount() {
if (initializerBuilder_ == null) {
return initializer_.size();
} else {
return initializerBuilder_.getCount();
}
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public Builder setInitializer(
int index, onnx.Onnx.TensorProto value) {
if (initializerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializerIsMutable();
initializer_.set(index, value);
onChanged();
} else {
initializerBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public Builder addInitializer(onnx.Onnx.TensorProto value) {
if (initializerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializerIsMutable();
initializer_.add(value);
onChanged();
} else {
initializerBuilder_.addMessage(value);
}
return this;
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public Builder addInitializer(
int index, onnx.Onnx.TensorProto value) {
if (initializerBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureInitializerIsMutable();
initializer_.add(index, value);
onChanged();
} else {
initializerBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public onnx.Onnx.TensorProto.Builder getInitializerBuilder(
int index) {
return getInitializerFieldBuilder().getBuilder(index);
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
*
* repeated .onnx.TensorProto initializer = 5;
*/
public onnx.Onnx.TensorProtoOrBuilder getInitializerOrBuilder(
int index) {
if (initializerBuilder_ == null) {
return initializer_.get(index); } else {
return initializerBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* A list of named tensor values, used to specify constant inputs of the graph.
* Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
* The name MUST be unique across both initializer and sparse_initializer,
* but the name MAY also appear in the input list.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public int getQuantizationAnnotationCount() {
if (quantizationAnnotationBuilder_ == null) {
return quantizationAnnotation_.size();
} else {
return quantizationAnnotationBuilder_.getCount();
}
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public Builder setQuantizationAnnotation(
int index, onnx.Onnx.TensorAnnotation value) {
if (quantizationAnnotationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantizationAnnotationIsMutable();
quantizationAnnotation_.set(index, value);
onChanged();
} else {
quantizationAnnotationBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public Builder addQuantizationAnnotation(onnx.Onnx.TensorAnnotation value) {
if (quantizationAnnotationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantizationAnnotationIsMutable();
quantizationAnnotation_.add(value);
onChanged();
} else {
quantizationAnnotationBuilder_.addMessage(value);
}
return this;
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public Builder addQuantizationAnnotation(
int index, onnx.Onnx.TensorAnnotation value) {
if (quantizationAnnotationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQuantizationAnnotationIsMutable();
quantizationAnnotation_.add(index, value);
onChanged();
} else {
quantizationAnnotationBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public onnx.Onnx.TensorAnnotation.Builder getQuantizationAnnotationBuilder(
int index) {
return getQuantizationAnnotationFieldBuilder().getBuilder(index);
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public onnx.Onnx.TensorAnnotationOrBuilder getQuantizationAnnotationOrBuilder(
int index) {
if (quantizationAnnotationBuilder_ == null) {
return quantizationAnnotation_.get(index); } else {
return quantizationAnnotationBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
* This field carries information to indicate the mapping among a tensor and its
* quantization parameter tensors. For example:
* For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
* which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
*
*
* repeated .onnx.TensorAnnotation quantization_annotation = 14;
*/
public java.util.List
getQuantizationAnnotationBuilderList() {
return getQuantizationAnnotationFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.TensorAnnotation, onnx.Onnx.TensorAnnotation.Builder, onnx.Onnx.TensorAnnotationOrBuilder>
getQuantizationAnnotationFieldBuilder() {
if (quantizationAnnotationBuilder_ == null) {
quantizationAnnotationBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<
onnx.Onnx.TensorAnnotation, onnx.Onnx.TensorAnnotation.Builder, onnx.Onnx.TensorAnnotationOrBuilder>(
quantizationAnnotation_,
((bitField0_ & 0x00000100) != 0),
getParentForChildren(),
isClean());
quantizationAnnotation_ = null;
}
return quantizationAnnotationBuilder_;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.GraphProto)
}
// @@protoc_insertion_point(class_scope:onnx.GraphProto)
private static final onnx.Onnx.GraphProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.GraphProto();
}
public static onnx.Onnx.GraphProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public GraphProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.GraphProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TensorProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TensorProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return A list containing the dims.
*/
java.util.List getDimsList();
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return The count of dims.
*/
int getDimsCount();
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
long getDims(int index);
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return Whether the dataType field is set.
*/
boolean hasDataType();
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return The dataType.
*/
int getDataType();
/**
* optional .onnx.TensorProto.Segment segment = 3;
* @return Whether the segment field is set.
*/
boolean hasSegment();
/**
* optional .onnx.TensorProto.Segment segment = 3;
* @return The segment.
*/
onnx.Onnx.TensorProto.Segment getSegment();
/**
* optional .onnx.TensorProto.Segment segment = 3;
*/
onnx.Onnx.TensorProto.SegmentOrBuilder getSegmentOrBuilder();
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return A list containing the floatData.
*/
java.util.List getFloatDataList();
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return The count of floatData.
*/
int getFloatDataCount();
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param index The index of the element to return.
* @return The floatData at the given index.
*/
float getFloatData(int index);
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return A list containing the int32Data.
*/
java.util.List getInt32DataList();
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return The count of int32Data.
*/
int getInt32DataCount();
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param index The index of the element to return.
* @return The int32Data at the given index.
*/
int getInt32Data(int index);
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return A list containing the stringData.
*/
java.util.List getStringDataList();
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return The count of stringData.
*/
int getStringDataCount();
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param index The index of the element to return.
* @return The stringData at the given index.
*/
com.google.protobuf.ByteString getStringData(int index);
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return A list containing the int64Data.
*/
java.util.List getInt64DataList();
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return The count of int64Data.
*/
int getInt64DataCount();
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param index The index of the element to return.
* @return The int64Data at the given index.
*/
long getInt64Data(int index);
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return Whether the name field is set.
*/
boolean hasName();
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The name.
*/
java.lang.String getName();
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The bytes for name.
*/
com.google.protobuf.ByteString
getNameBytes();
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @return Whether the docString field is set.
*/
boolean hasDocString();
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @return The bytes for docString.
*/
com.google.protobuf.ByteString
getDocStringBytes();
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
*
* optional bytes raw_data = 9;
* @return Whether the rawData field is set.
*/
boolean hasRawData();
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return A list containing the doubleData.
*/
java.util.List getDoubleDataList();
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return The count of doubleData.
*/
int getDoubleDataCount();
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param index The index of the element to return.
* @return The doubleData at the given index.
*/
double getDoubleData(int index);
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return A list containing the uint64Data.
*/
java.util.List getUint64DataList();
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return The count of uint64Data.
*/
int getUint64DataCount();
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param index The index of the element to return.
* @return The uint64Data at the given index.
*/
long getUint64Data(int index);
}
/**
*
* complex with float32 real and imaginary components
*
*
* COMPLEX64 = 14;
*/
COMPLEX64(14),
/**
*
* complex with float64 real and imaginary components
*
*
* COMPLEX128 = 15;
*/
COMPLEX128(15),
/**
*
* Non-IEEE floating-point format based on IEEE754 single-precision
* floating-point number truncated to 16 bits.
* This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
*
*
* BFLOAT16 = 16;
*/
BFLOAT16(16),
/**
*
* Non-IEEE floating-point format based on papers
* FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433,
* 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf.
* Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
* The computation usually happens inside a block quantize / dequantize
* fused by the runtime.
*
*
* FLOAT8E4M3FN = 17;
*/
FLOAT8E4M3FN(17),
/**
*
* float 8, mostly used for coefficients, supports nan, not inf, no negative zero
*
* follows IEEE 754, supports nan, inf, mostly used for gradients
*
*
* FLOAT8E5M2 = 19;
*/
FLOAT8E5M2(19),
/**
*
* follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero
*
*
* FLOAT8E5M2FNUZ = 20;
*/
FLOAT8E5M2FNUZ(20),
;
/**
* UNDEFINED = 0;
*/
public static final int UNDEFINED_VALUE = 0;
/**
*
* Basic types.
*
*
* FLOAT = 1;
*/
public static final int FLOAT_VALUE = 1;
/**
*
* uint8_t
*
*
* UINT8 = 2;
*/
public static final int UINT8_VALUE = 2;
/**
*
* int8_t
*
*
* INT8 = 3;
*/
public static final int INT8_VALUE = 3;
/**
*
* uint16_t
*
*
* UINT16 = 4;
*/
public static final int UINT16_VALUE = 4;
/**
*
* int16_t
*
*
* INT16 = 5;
*/
public static final int INT16_VALUE = 5;
/**
*
* int32_t
*
*
* INT32 = 6;
*/
public static final int INT32_VALUE = 6;
/**
*
* int64_t
*
*
* INT64 = 7;
*/
public static final int INT64_VALUE = 7;
/**
*
* string
*
*
* STRING = 8;
*/
public static final int STRING_VALUE = 8;
/**
*
* bool
*
*
* BOOL = 9;
*/
public static final int BOOL_VALUE = 9;
/**
*
* IEEE754 half-precision floating-point format (16 bits wide).
* This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
*
*
* FLOAT16 = 10;
*/
public static final int FLOAT16_VALUE = 10;
/**
* DOUBLE = 11;
*/
public static final int DOUBLE_VALUE = 11;
/**
* UINT32 = 12;
*/
public static final int UINT32_VALUE = 12;
/**
* UINT64 = 13;
*/
public static final int UINT64_VALUE = 13;
/**
*
* complex with float32 real and imaginary components
*
*
* COMPLEX64 = 14;
*/
public static final int COMPLEX64_VALUE = 14;
/**
*
* complex with float64 real and imaginary components
*
*
* COMPLEX128 = 15;
*/
public static final int COMPLEX128_VALUE = 15;
/**
*
* Non-IEEE floating-point format based on IEEE754 single-precision
* floating-point number truncated to 16 bits.
* This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
*
*
* BFLOAT16 = 16;
*/
public static final int BFLOAT16_VALUE = 16;
/**
*
* Non-IEEE floating-point format based on papers
* FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433,
* 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf.
* Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
* The computation usually happens inside a block quantize / dequantize
* fused by the runtime.
*
*
* FLOAT8E4M3FN = 17;
*/
public static final int FLOAT8E4M3FN_VALUE = 17;
/**
*
* float 8, mostly used for coefficients, supports nan, not inf, no negative zero
*
*
* FLOAT8E4M3FNUZ = 18;
*/
public static final int FLOAT8E4M3FNUZ_VALUE = 18;
/**
*
* follows IEEE 754, supports nan, inf, mostly used for gradients
*
*
* FLOAT8E5M2 = 19;
*/
public static final int FLOAT8E5M2_VALUE = 19;
/**
*
* follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero
*
*
* FLOAT8E5M2FNUZ = 20;
*/
public static final int FLOAT8E5M2FNUZ_VALUE = 20;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static DataType valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static DataType forNumber(int value) {
switch (value) {
case 0: return UNDEFINED;
case 1: return FLOAT;
case 2: return UINT8;
case 3: return INT8;
case 4: return UINT16;
case 5: return INT16;
case 6: return INT32;
case 7: return INT64;
case 8: return STRING;
case 9: return BOOL;
case 10: return FLOAT16;
case 11: return DOUBLE;
case 12: return UINT32;
case 13: return UINT64;
case 14: return COMPLEX64;
case 15: return COMPLEX128;
case 16: return BFLOAT16;
case 17: return FLOAT8E4M3FN;
case 18: return FLOAT8E4M3FNUZ;
case 19: return FLOAT8E5M2;
case 20: return FLOAT8E5M2FNUZ;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
DataType> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public DataType findValueByNumber(int number) {
return DataType.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return onnx.Onnx.TensorProto.getDescriptor().getEnumTypes().get(0);
}
private static final DataType[] VALUES = values();
public static DataType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private DataType(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:onnx.TensorProto.DataType)
}
/**
*
* Location of the data for this tensor. MUST be one of:
* - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field.
* - EXTERNAL - data stored in an external location as described by external_data field.
*
*
* Protobuf enum {@code onnx.TensorProto.DataLocation}
*/
public enum DataLocation
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DEFAULT = 0;
*/
DEFAULT(0),
/**
* EXTERNAL = 1;
*/
EXTERNAL(1),
;
/**
* DEFAULT = 0;
*/
public static final int DEFAULT_VALUE = 0;
/**
* EXTERNAL = 1;
*/
public static final int EXTERNAL_VALUE = 1;
public final int getNumber() {
return value;
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static DataLocation valueOf(int value) {
return forNumber(value);
}
/**
* @param value The numeric wire value of the corresponding enum entry.
* @return The enum associated with the given numeric wire value.
*/
public static DataLocation forNumber(int value) {
switch (value) {
case 0: return DEFAULT;
case 1: return EXTERNAL;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
DataLocation> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public DataLocation findValueByNumber(int number) {
return DataLocation.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return onnx.Onnx.TensorProto.getDescriptor().getEnumTypes().get(1);
}
private static final DataLocation[] VALUES = values();
public static DataLocation valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private DataLocation(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:onnx.TensorProto.DataLocation)
}
public interface SegmentOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TensorProto.Segment)
com.google.protobuf.MessageOrBuilder {
/**
* optional int64 begin = 1;
* @return Whether the begin field is set.
*/
boolean hasBegin();
/**
* optional int64 begin = 1;
* @return The begin.
*/
long getBegin();
/**
* optional int64 end = 2;
* @return Whether the end field is set.
*/
boolean hasEnd();
/**
* optional int64 end = 2;
* @return The end.
*/
long getEnd();
}
/**
*
* For very large tensors, we may want to store them in chunks, in which
* case the following fields will specify the segment that is stored in
* the current TensorProto.
*
*
* Protobuf type {@code onnx.TensorProto.Segment}
*/
public static final class Segment extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.TensorProto.Segment)
SegmentOrBuilder {
private static final long serialVersionUID = 0L;
// Use Segment.newBuilder() to construct.
private Segment(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Segment() {
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Segment();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorProto_Segment_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorProto_Segment_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorProto.Segment.class, onnx.Onnx.TensorProto.Segment.Builder.class);
}
private int bitField0_;
public static final int BEGIN_FIELD_NUMBER = 1;
private long begin_ = 0L;
/**
* optional int64 begin = 1;
* @return Whether the begin field is set.
*/
@java.lang.Override
public boolean hasBegin() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional int64 begin = 1;
* @return The begin.
*/
@java.lang.Override
public long getBegin() {
return begin_;
}
public static final int END_FIELD_NUMBER = 2;
private long end_ = 0L;
/**
* optional int64 end = 2;
* @return Whether the end field is set.
*/
@java.lang.Override
public boolean hasEnd() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional int64 end = 2;
* @return The end.
*/
@java.lang.Override
public long getEnd() {
return end_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeInt64(1, begin_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeInt64(2, end_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, begin_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(2, end_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.TensorProto.Segment)) {
return super.equals(obj);
}
onnx.Onnx.TensorProto.Segment other = (onnx.Onnx.TensorProto.Segment) obj;
if (hasBegin() != other.hasBegin()) return false;
if (hasBegin()) {
if (getBegin()
!= other.getBegin()) return false;
}
if (hasEnd() != other.hasEnd()) return false;
if (hasEnd()) {
if (getEnd()
!= other.getEnd()) return false;
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBegin()) {
hash = (37 * hash) + BEGIN_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getBegin());
}
if (hasEnd()) {
hash = (37 * hash) + END_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getEnd());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorProto.Segment parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto.Segment parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto.Segment parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.TensorProto.Segment prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* For very large tensors, we may want to store them in chunks, in which
* case the following fields will specify the segment that is stored in
* the current TensorProto.
*
*
* Protobuf type {@code onnx.TensorProto.Segment}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.TensorProto.Segment)
onnx.Onnx.TensorProto.SegmentOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorProto_Segment_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorProto_Segment_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorProto.Segment.class, onnx.Onnx.TensorProto.Segment.Builder.class);
}
// Construct using onnx.Onnx.TensorProto.Segment.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
begin_ = 0L;
end_ = 0L;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_TensorProto_Segment_descriptor;
}
@java.lang.Override
public onnx.Onnx.TensorProto.Segment getDefaultInstanceForType() {
return onnx.Onnx.TensorProto.Segment.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.TensorProto.Segment build() {
onnx.Onnx.TensorProto.Segment result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.TensorProto.Segment buildPartial() {
onnx.Onnx.TensorProto.Segment result = new onnx.Onnx.TensorProto.Segment(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(onnx.Onnx.TensorProto.Segment result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.begin_ = begin_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.end_ = end_;
to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.TensorProto.Segment) {
return mergeFrom((onnx.Onnx.TensorProto.Segment)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.TensorProto.Segment other) {
if (other == onnx.Onnx.TensorProto.Segment.getDefaultInstance()) return this;
if (other.hasBegin()) {
setBegin(other.getBegin());
}
if (other.hasEnd()) {
setEnd(other.getEnd());
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
begin_ = input.readInt64();
bitField0_ |= 0x00000001;
break;
} // case 8
case 16: {
end_ = input.readInt64();
bitField0_ |= 0x00000002;
break;
} // case 16
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private long begin_ ;
/**
* optional int64 begin = 1;
* @return Whether the begin field is set.
*/
@java.lang.Override
public boolean hasBegin() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
* optional int64 begin = 1;
* @return The begin.
*/
@java.lang.Override
public long getBegin() {
return begin_;
}
/**
* optional int64 begin = 1;
* @param value The begin to set.
* @return This builder for chaining.
*/
public Builder setBegin(long value) {
begin_ = value;
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
* optional int64 begin = 1;
* @return This builder for chaining.
*/
public Builder clearBegin() {
bitField0_ = (bitField0_ & ~0x00000001);
begin_ = 0L;
onChanged();
return this;
}
private long end_ ;
/**
* optional int64 end = 2;
* @return Whether the end field is set.
*/
@java.lang.Override
public boolean hasEnd() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional int64 end = 2;
* @return The end.
*/
@java.lang.Override
public long getEnd() {
return end_;
}
/**
* optional int64 end = 2;
* @param value The end to set.
* @return This builder for chaining.
*/
public Builder setEnd(long value) {
end_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
* optional int64 end = 2;
* @return This builder for chaining.
*/
public Builder clearEnd() {
bitField0_ = (bitField0_ & ~0x00000002);
end_ = 0L;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.TensorProto.Segment)
}
// @@protoc_insertion_point(class_scope:onnx.TensorProto.Segment)
private static final onnx.Onnx.TensorProto.Segment DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.TensorProto.Segment();
}
public static onnx.Onnx.TensorProto.Segment getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public Segment parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.TensorProto.Segment getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private int bitField0_;
public static final int DIMS_FIELD_NUMBER = 1;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.LongList dims_ =
emptyLongList();
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return A list containing the dims.
*/
@java.lang.Override
public java.util.List
getDimsList() {
return dims_;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return The count of dims.
*/
public int getDimsCount() {
return dims_.size();
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
public long getDims(int index) {
return dims_.getLong(index);
}
public static final int DATA_TYPE_FIELD_NUMBER = 2;
private int dataType_ = 0;
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return Whether the dataType field is set.
*/
@java.lang.Override
public boolean hasDataType() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return The dataType.
*/
@java.lang.Override
public int getDataType() {
return dataType_;
}
public static final int SEGMENT_FIELD_NUMBER = 3;
private onnx.Onnx.TensorProto.Segment segment_;
/**
* optional .onnx.TensorProto.Segment segment = 3;
* @return Whether the segment field is set.
*/
@java.lang.Override
public boolean hasSegment() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
* optional .onnx.TensorProto.Segment segment = 3;
* @return The segment.
*/
@java.lang.Override
public onnx.Onnx.TensorProto.Segment getSegment() {
return segment_ == null ? onnx.Onnx.TensorProto.Segment.getDefaultInstance() : segment_;
}
/**
* optional .onnx.TensorProto.Segment segment = 3;
*/
@java.lang.Override
public onnx.Onnx.TensorProto.SegmentOrBuilder getSegmentOrBuilder() {
return segment_ == null ? onnx.Onnx.TensorProto.Segment.getDefaultInstance() : segment_;
}
public static final int FLOAT_DATA_FIELD_NUMBER = 4;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.FloatList floatData_ =
emptyFloatList();
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return A list containing the floatData.
*/
@java.lang.Override
public java.util.List
getFloatDataList() {
return floatData_;
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return The count of floatData.
*/
public int getFloatDataCount() {
return floatData_.size();
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param index The index of the element to return.
* @return The floatData at the given index.
*/
public float getFloatData(int index) {
return floatData_.getFloat(index);
}
private int floatDataMemoizedSerializedSize = -1;
public static final int INT32_DATA_FIELD_NUMBER = 5;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.IntList int32Data_ =
emptyIntList();
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return A list containing the int32Data.
*/
@java.lang.Override
public java.util.List
getInt32DataList() {
return int32Data_;
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return The count of int32Data.
*/
public int getInt32DataCount() {
return int32Data_.size();
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param index The index of the element to return.
* @return The int32Data at the given index.
*/
public int getInt32Data(int index) {
return int32Data_.getInt(index);
}
private int int32DataMemoizedSerializedSize = -1;
public static final int STRING_DATA_FIELD_NUMBER = 6;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.ProtobufList stringData_ =
emptyList(com.google.protobuf.ByteString.class);
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return A list containing the stringData.
*/
@java.lang.Override
public java.util.List
getStringDataList() {
return stringData_;
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return The count of stringData.
*/
public int getStringDataCount() {
return stringData_.size();
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param index The index of the element to return.
* @return The stringData at the given index.
*/
public com.google.protobuf.ByteString getStringData(int index) {
return stringData_.get(index);
}
public static final int INT64_DATA_FIELD_NUMBER = 7;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.LongList int64Data_ =
emptyLongList();
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return A list containing the int64Data.
*/
@java.lang.Override
public java.util.List
getInt64DataList() {
return int64Data_;
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return The count of int64Data.
*/
public int getInt64DataCount() {
return int64Data_.size();
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param index The index of the element to return.
* @return The int64Data at the given index.
*/
public long getInt64Data(int index) {
return int64Data_.getLong(index);
}
private int int64DataMemoizedSerializedSize = -1;
public static final int NAME_FIELD_NUMBER = 8;
@SuppressWarnings("serial")
private volatile java.lang.Object name_ = "";
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return Whether the name field is set.
*/
@java.lang.Override
public boolean hasName() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The name.
*/
@java.lang.Override
public java.lang.String getName() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
}
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The bytes for name.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DOC_STRING_FIELD_NUMBER = 12;
@SuppressWarnings("serial")
private volatile java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @return Whether the docString field is set.
*/
@java.lang.Override
public boolean hasDocString() {
return ((bitField0_ & 0x00000008) != 0);
}
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @return The bytes for docString.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDocStringBytes() {
java.lang.Object ref = docString_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
docString_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int RAW_DATA_FIELD_NUMBER = 9;
private com.google.protobuf.ByteString rawData_ = com.google.protobuf.ByteString.EMPTY;
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
*
* optional bytes raw_data = 9;
* @return Whether the rawData field is set.
*/
@java.lang.Override
public boolean hasRawData() {
return ((bitField0_ & 0x00000010) != 0);
}
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
*
* optional bytes raw_data = 9;
* @return The rawData.
*/
@java.lang.Override
public com.google.protobuf.ByteString getRawData() {
return rawData_;
}
public static final int EXTERNAL_DATA_FIELD_NUMBER = 13;
@SuppressWarnings("serial")
private java.util.List externalData_;
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
@java.lang.Override
public int getExternalDataCount() {
return externalData_.size();
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
@java.lang.Override
public onnx.Onnx.StringStringEntryProtoOrBuilder getExternalDataOrBuilder(
int index) {
return externalData_.get(index);
}
public static final int DATA_LOCATION_FIELD_NUMBER = 14;
private int dataLocation_ = 0;
/**
*
* If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
*
*
* optional .onnx.TensorProto.DataLocation data_location = 14;
* @return Whether the dataLocation field is set.
*/
@java.lang.Override public boolean hasDataLocation() {
return ((bitField0_ & 0x00000020) != 0);
}
/**
*
* If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
*
*
* optional .onnx.TensorProto.DataLocation data_location = 14;
* @return The dataLocation.
*/
@java.lang.Override public onnx.Onnx.TensorProto.DataLocation getDataLocation() {
onnx.Onnx.TensorProto.DataLocation result = onnx.Onnx.TensorProto.DataLocation.forNumber(dataLocation_);
return result == null ? onnx.Onnx.TensorProto.DataLocation.DEFAULT : result;
}
public static final int DOUBLE_DATA_FIELD_NUMBER = 10;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.DoubleList doubleData_ =
emptyDoubleList();
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return A list containing the doubleData.
*/
@java.lang.Override
public java.util.List
getDoubleDataList() {
return doubleData_;
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return The count of doubleData.
*/
public int getDoubleDataCount() {
return doubleData_.size();
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param index The index of the element to return.
* @return The doubleData at the given index.
*/
public double getDoubleData(int index) {
return doubleData_.getDouble(index);
}
private int doubleDataMemoizedSerializedSize = -1;
public static final int UINT64_DATA_FIELD_NUMBER = 11;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.LongList uint64Data_ =
emptyLongList();
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return A list containing the uint64Data.
*/
@java.lang.Override
public java.util.List
getUint64DataList() {
return uint64Data_;
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return The count of uint64Data.
*/
public int getUint64DataCount() {
return uint64Data_.size();
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param index The index of the element to return.
* @return The uint64Data at the given index.
*/
public long getUint64Data(int index) {
return uint64Data_.getLong(index);
}
private int uint64DataMemoizedSerializedSize = -1;
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < dims_.size(); i++) {
output.writeInt64(1, dims_.getLong(i));
}
if (((bitField0_ & 0x00000001) != 0)) {
output.writeInt32(2, dataType_);
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(3, getSegment());
}
if (getFloatDataList().size() > 0) {
output.writeUInt32NoTag(34);
output.writeUInt32NoTag(floatDataMemoizedSerializedSize);
}
for (int i = 0; i < floatData_.size(); i++) {
output.writeFloatNoTag(floatData_.getFloat(i));
}
if (getInt32DataList().size() > 0) {
output.writeUInt32NoTag(42);
output.writeUInt32NoTag(int32DataMemoizedSerializedSize);
}
for (int i = 0; i < int32Data_.size(); i++) {
output.writeInt32NoTag(int32Data_.getInt(i));
}
for (int i = 0; i < stringData_.size(); i++) {
output.writeBytes(6, stringData_.get(i));
}
if (getInt64DataList().size() > 0) {
output.writeUInt32NoTag(58);
output.writeUInt32NoTag(int64DataMemoizedSerializedSize);
}
for (int i = 0; i < int64Data_.size(); i++) {
output.writeInt64NoTag(int64Data_.getLong(i));
}
if (((bitField0_ & 0x00000004) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 8, name_);
}
if (((bitField0_ & 0x00000010) != 0)) {
output.writeBytes(9, rawData_);
}
if (getDoubleDataList().size() > 0) {
output.writeUInt32NoTag(82);
output.writeUInt32NoTag(doubleDataMemoizedSerializedSize);
}
for (int i = 0; i < doubleData_.size(); i++) {
output.writeDoubleNoTag(doubleData_.getDouble(i));
}
if (getUint64DataList().size() > 0) {
output.writeUInt32NoTag(90);
output.writeUInt32NoTag(uint64DataMemoizedSerializedSize);
}
for (int i = 0; i < uint64Data_.size(); i++) {
output.writeUInt64NoTag(uint64Data_.getLong(i));
}
if (((bitField0_ & 0x00000008) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 12, docString_);
}
for (int i = 0; i < externalData_.size(); i++) {
output.writeMessage(13, externalData_.get(i));
}
if (((bitField0_ & 0x00000020) != 0)) {
output.writeEnum(14, dataLocation_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < dims_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt64SizeNoTag(dims_.getLong(i));
}
size += dataSize;
size += 1 * getDimsList().size();
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, dataType_);
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, getSegment());
}
{
int dataSize = 0;
dataSize = 4 * getFloatDataList().size();
size += dataSize;
if (!getFloatDataList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
floatDataMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < int32Data_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(int32Data_.getInt(i));
}
size += dataSize;
if (!getInt32DataList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
int32DataMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < stringData_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(stringData_.get(i));
}
size += dataSize;
size += 1 * getStringDataList().size();
}
{
int dataSize = 0;
for (int i = 0; i < int64Data_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt64SizeNoTag(int64Data_.getLong(i));
}
size += dataSize;
if (!getInt64DataList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
int64DataMemoizedSerializedSize = dataSize;
}
if (((bitField0_ & 0x00000004) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, name_);
}
if (((bitField0_ & 0x00000010) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(9, rawData_);
}
{
int dataSize = 0;
dataSize = 8 * getDoubleDataList().size();
size += dataSize;
if (!getDoubleDataList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
doubleDataMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < uint64Data_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(uint64Data_.getLong(i));
}
size += dataSize;
if (!getUint64DataList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
uint64DataMemoizedSerializedSize = dataSize;
}
if (((bitField0_ & 0x00000008) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(12, docString_);
}
for (int i = 0; i < externalData_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(13, externalData_.get(i));
}
if (((bitField0_ & 0x00000020) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(14, dataLocation_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.TensorProto)) {
return super.equals(obj);
}
onnx.Onnx.TensorProto other = (onnx.Onnx.TensorProto) obj;
if (!getDimsList()
.equals(other.getDimsList())) return false;
if (hasDataType() != other.hasDataType()) return false;
if (hasDataType()) {
if (getDataType()
!= other.getDataType()) return false;
}
if (hasSegment() != other.hasSegment()) return false;
if (hasSegment()) {
if (!getSegment()
.equals(other.getSegment())) return false;
}
if (!getFloatDataList()
.equals(other.getFloatDataList())) return false;
if (!getInt32DataList()
.equals(other.getInt32DataList())) return false;
if (!getStringDataList()
.equals(other.getStringDataList())) return false;
if (!getInt64DataList()
.equals(other.getInt64DataList())) return false;
if (hasName() != other.hasName()) return false;
if (hasName()) {
if (!getName()
.equals(other.getName())) return false;
}
if (hasDocString() != other.hasDocString()) return false;
if (hasDocString()) {
if (!getDocString()
.equals(other.getDocString())) return false;
}
if (hasRawData() != other.hasRawData()) return false;
if (hasRawData()) {
if (!getRawData()
.equals(other.getRawData())) return false;
}
if (!getExternalDataList()
.equals(other.getExternalDataList())) return false;
if (hasDataLocation() != other.hasDataLocation()) return false;
if (hasDataLocation()) {
if (dataLocation_ != other.dataLocation_) return false;
}
if (!getDoubleDataList()
.equals(other.getDoubleDataList())) return false;
if (!getUint64DataList()
.equals(other.getUint64DataList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (getDimsCount() > 0) {
hash = (37 * hash) + DIMS_FIELD_NUMBER;
hash = (53 * hash) + getDimsList().hashCode();
}
if (hasDataType()) {
hash = (37 * hash) + DATA_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getDataType();
}
if (hasSegment()) {
hash = (37 * hash) + SEGMENT_FIELD_NUMBER;
hash = (53 * hash) + getSegment().hashCode();
}
if (getFloatDataCount() > 0) {
hash = (37 * hash) + FLOAT_DATA_FIELD_NUMBER;
hash = (53 * hash) + getFloatDataList().hashCode();
}
if (getInt32DataCount() > 0) {
hash = (37 * hash) + INT32_DATA_FIELD_NUMBER;
hash = (53 * hash) + getInt32DataList().hashCode();
}
if (getStringDataCount() > 0) {
hash = (37 * hash) + STRING_DATA_FIELD_NUMBER;
hash = (53 * hash) + getStringDataList().hashCode();
}
if (getInt64DataCount() > 0) {
hash = (37 * hash) + INT64_DATA_FIELD_NUMBER;
hash = (53 * hash) + getInt64DataList().hashCode();
}
if (hasName()) {
hash = (37 * hash) + NAME_FIELD_NUMBER;
hash = (53 * hash) + getName().hashCode();
}
if (hasDocString()) {
hash = (37 * hash) + DOC_STRING_FIELD_NUMBER;
hash = (53 * hash) + getDocString().hashCode();
}
if (hasRawData()) {
hash = (37 * hash) + RAW_DATA_FIELD_NUMBER;
hash = (53 * hash) + getRawData().hashCode();
}
if (getExternalDataCount() > 0) {
hash = (37 * hash) + EXTERNAL_DATA_FIELD_NUMBER;
hash = (53 * hash) + getExternalDataList().hashCode();
}
if (hasDataLocation()) {
hash = (37 * hash) + DATA_LOCATION_FIELD_NUMBER;
hash = (53 * hash) + dataLocation_;
}
if (getDoubleDataCount() > 0) {
hash = (37 * hash) + DOUBLE_DATA_FIELD_NUMBER;
hash = (53 * hash) + getDoubleDataList().hashCode();
}
if (getUint64DataCount() > 0) {
hash = (37 * hash) + UINT64_DATA_FIELD_NUMBER;
hash = (53 * hash) + getUint64DataList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.TensorProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.TensorProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* Tensors
*
* A serialized tensor value.
*
*
* Protobuf type {@code onnx.TensorProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.TensorProto)
onnx.Onnx.TensorProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorProto.class, onnx.Onnx.TensorProto.Builder.class);
}
// Construct using onnx.Onnx.TensorProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getSegmentFieldBuilder();
getExternalDataFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
dims_ = emptyLongList();
dataType_ = 0;
segment_ = null;
if (segmentBuilder_ != null) {
segmentBuilder_.dispose();
segmentBuilder_ = null;
}
floatData_ = emptyFloatList();
int32Data_ = emptyIntList();
stringData_ = emptyList(com.google.protobuf.ByteString.class);
int64Data_ = emptyLongList();
name_ = "";
docString_ = "";
rawData_ = com.google.protobuf.ByteString.EMPTY;
if (externalDataBuilder_ == null) {
externalData_ = java.util.Collections.emptyList();
} else {
externalData_ = null;
externalDataBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000400);
dataLocation_ = 0;
doubleData_ = emptyDoubleList();
uint64Data_ = emptyLongList();
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_TensorProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.TensorProto getDefaultInstanceForType() {
return onnx.Onnx.TensorProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.TensorProto build() {
onnx.Onnx.TensorProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.TensorProto buildPartial() {
onnx.Onnx.TensorProto result = new onnx.Onnx.TensorProto(this);
buildPartialRepeatedFields(result);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartialRepeatedFields(onnx.Onnx.TensorProto result) {
if (externalDataBuilder_ == null) {
if (((bitField0_ & 0x00000400) != 0)) {
externalData_ = java.util.Collections.unmodifiableList(externalData_);
bitField0_ = (bitField0_ & ~0x00000400);
}
result.externalData_ = externalData_;
} else {
result.externalData_ = externalDataBuilder_.build();
}
}
private void buildPartial0(onnx.Onnx.TensorProto result) {
int from_bitField0_ = bitField0_;
if (((from_bitField0_ & 0x00000001) != 0)) {
dims_.makeImmutable();
result.dims_ = dims_;
}
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.dataType_ = dataType_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.segment_ = segmentBuilder_ == null
? segment_
: segmentBuilder_.build();
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
floatData_.makeImmutable();
result.floatData_ = floatData_;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
int32Data_.makeImmutable();
result.int32Data_ = int32Data_;
}
if (((from_bitField0_ & 0x00000020) != 0)) {
stringData_.makeImmutable();
result.stringData_ = stringData_;
}
if (((from_bitField0_ & 0x00000040) != 0)) {
int64Data_.makeImmutable();
result.int64Data_ = int64Data_;
}
if (((from_bitField0_ & 0x00000080) != 0)) {
result.name_ = name_;
to_bitField0_ |= 0x00000004;
}
if (((from_bitField0_ & 0x00000100) != 0)) {
result.docString_ = docString_;
to_bitField0_ |= 0x00000008;
}
if (((from_bitField0_ & 0x00000200) != 0)) {
result.rawData_ = rawData_;
to_bitField0_ |= 0x00000010;
}
if (((from_bitField0_ & 0x00000800) != 0)) {
result.dataLocation_ = dataLocation_;
to_bitField0_ |= 0x00000020;
}
if (((from_bitField0_ & 0x00001000) != 0)) {
doubleData_.makeImmutable();
result.doubleData_ = doubleData_;
}
if (((from_bitField0_ & 0x00002000) != 0)) {
uint64Data_.makeImmutable();
result.uint64Data_ = uint64Data_;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.TensorProto) {
return mergeFrom((onnx.Onnx.TensorProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.TensorProto other) {
if (other == onnx.Onnx.TensorProto.getDefaultInstance()) return this;
if (!other.dims_.isEmpty()) {
if (dims_.isEmpty()) {
dims_ = other.dims_;
dims_.makeImmutable();
bitField0_ |= 0x00000001;
} else {
ensureDimsIsMutable();
dims_.addAll(other.dims_);
}
onChanged();
}
if (other.hasDataType()) {
setDataType(other.getDataType());
}
if (other.hasSegment()) {
mergeSegment(other.getSegment());
}
if (!other.floatData_.isEmpty()) {
if (floatData_.isEmpty()) {
floatData_ = other.floatData_;
floatData_.makeImmutable();
bitField0_ |= 0x00000008;
} else {
ensureFloatDataIsMutable();
floatData_.addAll(other.floatData_);
}
onChanged();
}
if (!other.int32Data_.isEmpty()) {
if (int32Data_.isEmpty()) {
int32Data_ = other.int32Data_;
int32Data_.makeImmutable();
bitField0_ |= 0x00000010;
} else {
ensureInt32DataIsMutable();
int32Data_.addAll(other.int32Data_);
}
onChanged();
}
if (!other.stringData_.isEmpty()) {
if (stringData_.isEmpty()) {
stringData_ = other.stringData_;
stringData_.makeImmutable();
bitField0_ |= 0x00000020;
} else {
ensureStringDataIsMutable();
stringData_.addAll(other.stringData_);
}
onChanged();
}
if (!other.int64Data_.isEmpty()) {
if (int64Data_.isEmpty()) {
int64Data_ = other.int64Data_;
int64Data_.makeImmutable();
bitField0_ |= 0x00000040;
} else {
ensureInt64DataIsMutable();
int64Data_.addAll(other.int64Data_);
}
onChanged();
}
if (other.hasName()) {
name_ = other.name_;
bitField0_ |= 0x00000080;
onChanged();
}
if (other.hasDocString()) {
docString_ = other.docString_;
bitField0_ |= 0x00000100;
onChanged();
}
if (other.hasRawData()) {
setRawData(other.getRawData());
}
if (externalDataBuilder_ == null) {
if (!other.externalData_.isEmpty()) {
if (externalData_.isEmpty()) {
externalData_ = other.externalData_;
bitField0_ = (bitField0_ & ~0x00000400);
} else {
ensureExternalDataIsMutable();
externalData_.addAll(other.externalData_);
}
onChanged();
}
} else {
if (!other.externalData_.isEmpty()) {
if (externalDataBuilder_.isEmpty()) {
externalDataBuilder_.dispose();
externalDataBuilder_ = null;
externalData_ = other.externalData_;
bitField0_ = (bitField0_ & ~0x00000400);
externalDataBuilder_ =
com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
getExternalDataFieldBuilder() : null;
} else {
externalDataBuilder_.addAllMessages(other.externalData_);
}
}
}
if (other.hasDataLocation()) {
setDataLocation(other.getDataLocation());
}
if (!other.doubleData_.isEmpty()) {
if (doubleData_.isEmpty()) {
doubleData_ = other.doubleData_;
doubleData_.makeImmutable();
bitField0_ |= 0x00001000;
} else {
ensureDoubleDataIsMutable();
doubleData_.addAll(other.doubleData_);
}
onChanged();
}
if (!other.uint64Data_.isEmpty()) {
if (uint64Data_.isEmpty()) {
uint64Data_ = other.uint64Data_;
uint64Data_.makeImmutable();
bitField0_ |= 0x00002000;
} else {
ensureUint64DataIsMutable();
uint64Data_.addAll(other.uint64Data_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
long v = input.readInt64();
ensureDimsIsMutable();
dims_.addLong(v);
break;
} // case 8
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureDimsIsMutable();
while (input.getBytesUntilLimit() > 0) {
dims_.addLong(input.readInt64());
}
input.popLimit(limit);
break;
} // case 10
case 16: {
dataType_ = input.readInt32();
bitField0_ |= 0x00000002;
break;
} // case 16
case 26: {
input.readMessage(
getSegmentFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000004;
break;
} // case 26
case 37: {
float v = input.readFloat();
ensureFloatDataIsMutable();
floatData_.addFloat(v);
break;
} // case 37
case 34: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
int alloc = length > 4096 ? 4096 : length;
ensureFloatDataIsMutable(alloc / 4);
while (input.getBytesUntilLimit() > 0) {
floatData_.addFloat(input.readFloat());
}
input.popLimit(limit);
break;
} // case 34
case 40: {
int v = input.readInt32();
ensureInt32DataIsMutable();
int32Data_.addInt(v);
break;
} // case 40
case 42: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureInt32DataIsMutable();
while (input.getBytesUntilLimit() > 0) {
int32Data_.addInt(input.readInt32());
}
input.popLimit(limit);
break;
} // case 42
case 50: {
com.google.protobuf.ByteString v = input.readBytes();
ensureStringDataIsMutable();
stringData_.add(v);
break;
} // case 50
case 56: {
long v = input.readInt64();
ensureInt64DataIsMutable();
int64Data_.addLong(v);
break;
} // case 56
case 58: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureInt64DataIsMutable();
while (input.getBytesUntilLimit() > 0) {
int64Data_.addLong(input.readInt64());
}
input.popLimit(limit);
break;
} // case 58
case 66: {
name_ = input.readBytes();
bitField0_ |= 0x00000080;
break;
} // case 66
case 74: {
rawData_ = input.readBytes();
bitField0_ |= 0x00000200;
break;
} // case 74
case 81: {
double v = input.readDouble();
ensureDoubleDataIsMutable();
doubleData_.addDouble(v);
break;
} // case 81
case 82: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
int alloc = length > 4096 ? 4096 : length;
ensureDoubleDataIsMutable(alloc / 8);
while (input.getBytesUntilLimit() > 0) {
doubleData_.addDouble(input.readDouble());
}
input.popLimit(limit);
break;
} // case 82
case 88: {
long v = input.readUInt64();
ensureUint64DataIsMutable();
uint64Data_.addLong(v);
break;
} // case 88
case 90: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureUint64DataIsMutable();
while (input.getBytesUntilLimit() > 0) {
uint64Data_.addLong(input.readUInt64());
}
input.popLimit(limit);
break;
} // case 90
case 98: {
docString_ = input.readBytes();
bitField0_ |= 0x00000100;
break;
} // case 98
case 106: {
onnx.Onnx.StringStringEntryProto m =
input.readMessage(
onnx.Onnx.StringStringEntryProto.PARSER,
extensionRegistry);
if (externalDataBuilder_ == null) {
ensureExternalDataIsMutable();
externalData_.add(m);
} else {
externalDataBuilder_.addMessage(m);
}
break;
} // case 106
case 112: {
int tmpRaw = input.readEnum();
onnx.Onnx.TensorProto.DataLocation tmpValue =
onnx.Onnx.TensorProto.DataLocation.forNumber(tmpRaw);
if (tmpValue == null) {
mergeUnknownVarintField(14, tmpRaw);
} else {
dataLocation_ = tmpRaw;
bitField0_ |= 0x00000800;
}
break;
} // case 112
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private com.google.protobuf.Internal.LongList dims_ = emptyLongList();
private void ensureDimsIsMutable() {
if (!dims_.isModifiable()) {
dims_ = makeMutableCopy(dims_);
}
bitField0_ |= 0x00000001;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return A list containing the dims.
*/
public java.util.List
getDimsList() {
dims_.makeImmutable();
return dims_;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return The count of dims.
*/
public int getDimsCount() {
return dims_.size();
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
public long getDims(int index) {
return dims_.getLong(index);
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param index The index to set the value at.
* @param value The dims to set.
* @return This builder for chaining.
*/
public Builder setDims(
int index, long value) {
ensureDimsIsMutable();
dims_.setLong(index, value);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param value The dims to add.
* @return This builder for chaining.
*/
public Builder addDims(long value) {
ensureDimsIsMutable();
dims_.addLong(value);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @param values The dims to add.
* @return This builder for chaining.
*/
public Builder addAllDims(
java.lang.Iterable extends java.lang.Long> values) {
ensureDimsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, dims_);
bitField0_ |= 0x00000001;
onChanged();
return this;
}
/**
*
* The shape of the tensor.
*
*
* repeated int64 dims = 1;
* @return This builder for chaining.
*/
public Builder clearDims() {
dims_ = emptyLongList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
private int dataType_ ;
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return Whether the dataType field is set.
*/
@java.lang.Override
public boolean hasDataType() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @return The dataType.
*/
@java.lang.Override
public int getDataType() {
return dataType_;
}
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
*
* optional int32 data_type = 2;
* @param value The dataType to set.
* @return This builder for chaining.
*/
public Builder setDataType(int value) {
dataType_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* The data type of the tensor.
* This field MUST have a valid TensorProto.DataType value
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return A list containing the floatData.
*/
public java.util.List
getFloatDataList() {
floatData_.makeImmutable();
return floatData_;
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @return The count of floatData.
*/
public int getFloatDataCount() {
return floatData_.size();
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param index The index of the element to return.
* @return The floatData at the given index.
*/
public float getFloatData(int index) {
return floatData_.getFloat(index);
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param index The index to set the value at.
* @param value The floatData to set.
* @return This builder for chaining.
*/
public Builder setFloatData(
int index, float value) {
ensureFloatDataIsMutable();
floatData_.setFloat(index, value);
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param value The floatData to add.
* @return This builder for chaining.
*/
public Builder addFloatData(float value) {
ensureFloatDataIsMutable();
floatData_.addFloat(value);
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
*
* repeated float float_data = 4 [packed = true];
* @param values The floatData to add.
* @return This builder for chaining.
*/
public Builder addAllFloatData(
java.lang.Iterable extends java.lang.Float> values) {
ensureFloatDataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, floatData_);
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
*
* For float and complex64 values
* Complex64 tensors are encoded as a single array of floats,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return A list containing the int32Data.
*/
public java.util.List
getInt32DataList() {
int32Data_.makeImmutable();
return int32Data_;
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @return The count of int32Data.
*/
public int getInt32DataCount() {
return int32Data_.size();
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param index The index of the element to return.
* @return The int32Data at the given index.
*/
public int getInt32Data(int index) {
return int32Data_.getInt(index);
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param index The index to set the value at.
* @param value The int32Data to set.
* @return This builder for chaining.
*/
public Builder setInt32Data(
int index, int value) {
ensureInt32DataIsMutable();
int32Data_.setInt(index, value);
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param value The int32Data to add.
* @return This builder for chaining.
*/
public Builder addInt32Data(int value) {
ensureInt32DataIsMutable();
int32Data_.addInt(value);
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
*
* repeated int32 int32_data = 5 [packed = true];
* @param values The int32Data to add.
* @return This builder for chaining.
*/
public Builder addAllInt32Data(
java.lang.Iterable extends java.lang.Integer> values) {
ensureInt32DataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, int32Data_);
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
* For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
* float16 and float8 values must be bit-wise converted to an uint16_t prior
* to writing to the buffer.
* When this field is present, the data_type field MUST be
* INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return A list containing the stringData.
*/
public java.util.List
getStringDataList() {
stringData_.makeImmutable();
return stringData_;
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @return The count of stringData.
*/
public int getStringDataCount() {
return stringData_.size();
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param index The index of the element to return.
* @return The stringData at the given index.
*/
public com.google.protobuf.ByteString getStringData(int index) {
return stringData_.get(index);
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param index The index to set the value at.
* @param value The stringData to set.
* @return This builder for chaining.
*/
public Builder setStringData(
int index, com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureStringDataIsMutable();
stringData_.set(index, value);
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param value The stringData to add.
* @return This builder for chaining.
*/
public Builder addStringData(com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
ensureStringDataIsMutable();
stringData_.add(value);
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
*
* repeated bytes string_data = 6;
* @param values The stringData to add.
* @return This builder for chaining.
*/
public Builder addAllStringData(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureStringDataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, stringData_);
bitField0_ |= 0x00000020;
onChanged();
return this;
}
/**
*
* For strings.
* Each element of string_data is a UTF-8 encoded Unicode
* string. No trailing null, no leading BOM. The protobuf "string"
* scalar type is not used to match ML community conventions.
* When this field is present, the data_type field MUST be STRING
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return A list containing the int64Data.
*/
public java.util.List
getInt64DataList() {
int64Data_.makeImmutable();
return int64Data_;
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @return The count of int64Data.
*/
public int getInt64DataCount() {
return int64Data_.size();
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param index The index of the element to return.
* @return The int64Data at the given index.
*/
public long getInt64Data(int index) {
return int64Data_.getLong(index);
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param index The index to set the value at.
* @param value The int64Data to set.
* @return This builder for chaining.
*/
public Builder setInt64Data(
int index, long value) {
ensureInt64DataIsMutable();
int64Data_.setLong(index, value);
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param value The int64Data to add.
* @return This builder for chaining.
*/
public Builder addInt64Data(long value) {
ensureInt64DataIsMutable();
int64Data_.addLong(value);
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* repeated int64 int64_data = 7 [packed = true];
* @param values The int64Data to add.
* @return This builder for chaining.
*/
public Builder addAllInt64Data(
java.lang.Iterable extends java.lang.Long> values) {
ensureInt64DataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, int64Data_);
bitField0_ |= 0x00000040;
onChanged();
return this;
}
/**
*
* For int64.
* When this field is present, the data_type field MUST be INT64
*
*
* optional string name = 8;
* @return Whether the name field is set.
*/
public boolean hasName() {
return ((bitField0_ & 0x00000080) != 0);
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The name.
*/
public java.lang.String getName() {
java.lang.Object ref = name_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
name_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return The bytes for name.
*/
public com.google.protobuf.ByteString
getNameBytes() {
java.lang.Object ref = name_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
name_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @param value The name to set.
* @return This builder for chaining.
*/
public Builder setName(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000080;
onChanged();
return this;
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @return This builder for chaining.
*/
public Builder clearName() {
name_ = getDefaultInstance().getName();
bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
return this;
}
/**
*
* Optionally, a name for the tensor.
*
*
* optional string name = 8;
* @param value The bytes for name to set.
* @return This builder for chaining.
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
name_ = value;
bitField0_ |= 0x00000080;
onChanged();
return this;
}
private java.lang.Object docString_ = "";
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @return Whether the docString field is set.
*/
public boolean hasDocString() {
return ((bitField0_ & 0x00000100) != 0);
}
/**
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
* A human-readable documentation for this tensor. Markdown is allowed.
*
*
* optional string doc_string = 12;
* @param value The bytes for docString to set.
* @return This builder for chaining.
*/
public Builder setDocStringBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
docString_ = value;
bitField0_ |= 0x00000100;
onChanged();
return this;
}
private com.google.protobuf.ByteString rawData_ = com.google.protobuf.ByteString.EMPTY;
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
*
* optional bytes raw_data = 9;
* @return Whether the rawData field is set.
*/
@java.lang.Override
public boolean hasRawData() {
return ((bitField0_ & 0x00000200) != 0);
}
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
*
* optional bytes raw_data = 9;
* @param value The rawData to set.
* @return This builder for chaining.
*/
public Builder setRawData(com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
rawData_ = value;
bitField0_ |= 0x00000200;
onChanged();
return this;
}
/**
*
* Serializations can either use one of the fields above, or use this
* raw bytes field. The only exception is the string case, where one is
* required to store the content in the repeated bytes string_data field.
*
* When this raw_data field is used to store tensor value, elements MUST
* be stored in as fixed-width, little-endian order.
* Floating-point data types MUST be stored in IEEE 754 format.
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
*
* Note: the advantage of specific field rather than the raw_data field is
* that in some cases (e.g. int data), protobuf does a better packing via
* variable length storage, and may lead to smaller binary footprint.
* When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public int getExternalDataCount() {
if (externalDataBuilder_ == null) {
return externalData_.size();
} else {
return externalDataBuilder_.getCount();
}
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public Builder setExternalData(
int index, onnx.Onnx.StringStringEntryProto value) {
if (externalDataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExternalDataIsMutable();
externalData_.set(index, value);
onChanged();
} else {
externalDataBuilder_.setMessage(index, value);
}
return this;
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public Builder addExternalData(onnx.Onnx.StringStringEntryProto value) {
if (externalDataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExternalDataIsMutable();
externalData_.add(value);
onChanged();
} else {
externalDataBuilder_.addMessage(value);
}
return this;
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public Builder addExternalData(
int index, onnx.Onnx.StringStringEntryProto value) {
if (externalDataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureExternalDataIsMutable();
externalData_.add(index, value);
onChanged();
} else {
externalDataBuilder_.addMessage(index, value);
}
return this;
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public onnx.Onnx.StringStringEntryProto.Builder getExternalDataBuilder(
int index) {
return getExternalDataFieldBuilder().getBuilder(index);
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
*
* repeated .onnx.StringStringEntryProto external_data = 13;
*/
public onnx.Onnx.StringStringEntryProtoOrBuilder getExternalDataOrBuilder(
int index) {
if (externalDataBuilder_ == null) {
return externalData_.get(index); } else {
return externalDataBuilder_.getMessageOrBuilder(index);
}
}
/**
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* Data can be stored inside the protobuf file using type-specific fields or raw_data.
* Alternatively, raw bytes data can be stored in an external file, using the external_data field.
* external_data stores key-value pairs describing data location. Recognized keys are:
* - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
* protobuf model was stored
* - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
* Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
* - "length" (optional) - number of bytes containing data. Integer stored as string.
* - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return A list containing the doubleData.
*/
public java.util.List
getDoubleDataList() {
doubleData_.makeImmutable();
return doubleData_;
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @return The count of doubleData.
*/
public int getDoubleDataCount() {
return doubleData_.size();
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param index The index of the element to return.
* @return The doubleData at the given index.
*/
public double getDoubleData(int index) {
return doubleData_.getDouble(index);
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param index The index to set the value at.
* @param value The doubleData to set.
* @return This builder for chaining.
*/
public Builder setDoubleData(
int index, double value) {
ensureDoubleDataIsMutable();
doubleData_.setDouble(index, value);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param value The doubleData to add.
* @return This builder for chaining.
*/
public Builder addDoubleData(double value) {
ensureDoubleDataIsMutable();
doubleData_.addDouble(value);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
*
* repeated double double_data = 10 [packed = true];
* @param values The doubleData to add.
* @return This builder for chaining.
*/
public Builder addAllDoubleData(
java.lang.Iterable extends java.lang.Double> values) {
ensureDoubleDataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, doubleData_);
bitField0_ |= 0x00001000;
onChanged();
return this;
}
/**
*
* For double
* Complex128 tensors are encoded as a single array of doubles,
* with the real components appearing in odd numbered positions,
* and the corresponding imaginary component appearing in the
* subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i]
* is encoded as [1.0, 2.0 ,3.0 ,4.0]
* When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return A list containing the uint64Data.
*/
public java.util.List
getUint64DataList() {
uint64Data_.makeImmutable();
return uint64Data_;
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return The count of uint64Data.
*/
public int getUint64DataCount() {
return uint64Data_.size();
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param index The index of the element to return.
* @return The uint64Data at the given index.
*/
public long getUint64Data(int index) {
return uint64Data_.getLong(index);
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param index The index to set the value at.
* @param value The uint64Data to set.
* @return This builder for chaining.
*/
public Builder setUint64Data(
int index, long value) {
ensureUint64DataIsMutable();
uint64Data_.setLong(index, value);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param value The uint64Data to add.
* @return This builder for chaining.
*/
public Builder addUint64Data(long value) {
ensureUint64DataIsMutable();
uint64Data_.addLong(value);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @param values The uint64Data to add.
* @return This builder for chaining.
*/
public Builder addAllUint64Data(
java.lang.Iterable extends java.lang.Long> values) {
ensureUint64DataIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, uint64Data_);
bitField0_ |= 0x00002000;
onChanged();
return this;
}
/**
*
* For uint64 and uint32 values
* When this field is present, the data_type field MUST be
* UINT32 or UINT64
*
*
* repeated uint64 uint64_data = 11 [packed = true];
* @return This builder for chaining.
*/
public Builder clearUint64Data() {
uint64Data_ = emptyLongList();
bitField0_ = (bitField0_ & ~0x00002000);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.TensorProto)
}
// @@protoc_insertion_point(class_scope:onnx.TensorProto)
private static final onnx.Onnx.TensorProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.TensorProto();
}
public static onnx.Onnx.TensorProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public TensorProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.TensorProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface SparseTensorProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.SparseTensorProto)
com.google.protobuf.MessageOrBuilder {
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
*
* optional .onnx.TensorProto values = 1;
* @return Whether the values field is set.
*/
boolean hasValues();
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return Whether the indices field is set.
*/
boolean hasIndices();
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return The indices.
*/
onnx.Onnx.TensorProto getIndices();
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return A list containing the dims.
*/
java.util.List getDimsList();
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return The count of dims.
*/
int getDimsCount();
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
long getDims(int index);
}
/**
*
* A serialized sparse-tensor value
*
*
* Protobuf type {@code onnx.SparseTensorProto}
*/
public static final class SparseTensorProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.SparseTensorProto)
SparseTensorProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use SparseTensorProto.newBuilder() to construct.
private SparseTensorProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private SparseTensorProto() {
dims_ = emptyLongList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new SparseTensorProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_SparseTensorProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_SparseTensorProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.SparseTensorProto.class, onnx.Onnx.SparseTensorProto.Builder.class);
}
private int bitField0_;
public static final int VALUES_FIELD_NUMBER = 1;
private onnx.Onnx.TensorProto values_;
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
*
* optional .onnx.TensorProto values = 1;
* @return Whether the values field is set.
*/
@java.lang.Override
public boolean hasValues() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
*
* optional .onnx.TensorProto values = 1;
*/
@java.lang.Override
public onnx.Onnx.TensorProtoOrBuilder getValuesOrBuilder() {
return values_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : values_;
}
public static final int INDICES_FIELD_NUMBER = 2;
private onnx.Onnx.TensorProto indices_;
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return Whether the indices field is set.
*/
@java.lang.Override
public boolean hasIndices() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return The indices.
*/
@java.lang.Override
public onnx.Onnx.TensorProto getIndices() {
return indices_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : indices_;
}
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
*/
@java.lang.Override
public onnx.Onnx.TensorProtoOrBuilder getIndicesOrBuilder() {
return indices_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : indices_;
}
public static final int DIMS_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private com.google.protobuf.Internal.LongList dims_ =
emptyLongList();
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return A list containing the dims.
*/
@java.lang.Override
public java.util.List
getDimsList() {
return dims_;
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return The count of dims.
*/
public int getDimsCount() {
return dims_.size();
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
public long getDims(int index) {
return dims_.getLong(index);
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) != 0)) {
output.writeMessage(1, getValues());
}
if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(2, getIndices());
}
for (int i = 0; i < dims_.size(); i++) {
output.writeInt64(3, dims_.getLong(i));
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, getValues());
}
if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, getIndices());
}
{
int dataSize = 0;
for (int i = 0; i < dims_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt64SizeNoTag(dims_.getLong(i));
}
size += dataSize;
size += 1 * getDimsList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.SparseTensorProto)) {
return super.equals(obj);
}
onnx.Onnx.SparseTensorProto other = (onnx.Onnx.SparseTensorProto) obj;
if (hasValues() != other.hasValues()) return false;
if (hasValues()) {
if (!getValues()
.equals(other.getValues())) return false;
}
if (hasIndices() != other.hasIndices()) return false;
if (hasIndices()) {
if (!getIndices()
.equals(other.getIndices())) return false;
}
if (!getDimsList()
.equals(other.getDimsList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasValues()) {
hash = (37 * hash) + VALUES_FIELD_NUMBER;
hash = (53 * hash) + getValues().hashCode();
}
if (hasIndices()) {
hash = (37 * hash) + INDICES_FIELD_NUMBER;
hash = (53 * hash) + getIndices().hashCode();
}
if (getDimsCount() > 0) {
hash = (37 * hash) + DIMS_FIELD_NUMBER;
hash = (53 * hash) + getDimsList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.SparseTensorProto parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.SparseTensorProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.SparseTensorProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.SparseTensorProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.SparseTensorProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.SparseTensorProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.SparseTensorProto prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* A serialized sparse-tensor value
*
*
* Protobuf type {@code onnx.SparseTensorProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.SparseTensorProto)
onnx.Onnx.SparseTensorProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_SparseTensorProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_SparseTensorProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.SparseTensorProto.class, onnx.Onnx.SparseTensorProto.Builder.class);
}
// Construct using onnx.Onnx.SparseTensorProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
getValuesFieldBuilder();
getIndicesFieldBuilder();
}
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
values_ = null;
if (valuesBuilder_ != null) {
valuesBuilder_.dispose();
valuesBuilder_ = null;
}
indices_ = null;
if (indicesBuilder_ != null) {
indicesBuilder_.dispose();
indicesBuilder_ = null;
}
dims_ = emptyLongList();
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_SparseTensorProto_descriptor;
}
@java.lang.Override
public onnx.Onnx.SparseTensorProto getDefaultInstanceForType() {
return onnx.Onnx.SparseTensorProto.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.SparseTensorProto build() {
onnx.Onnx.SparseTensorProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.SparseTensorProto buildPartial() {
onnx.Onnx.SparseTensorProto result = new onnx.Onnx.SparseTensorProto(this);
if (bitField0_ != 0) { buildPartial0(result); }
onBuilt();
return result;
}
private void buildPartial0(onnx.Onnx.SparseTensorProto result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) != 0)) {
result.values_ = valuesBuilder_ == null
? values_
: valuesBuilder_.build();
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000002) != 0)) {
result.indices_ = indicesBuilder_ == null
? indices_
: indicesBuilder_.build();
to_bitField0_ |= 0x00000002;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
dims_.makeImmutable();
result.dims_ = dims_;
}
result.bitField0_ |= to_bitField0_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.SparseTensorProto) {
return mergeFrom((onnx.Onnx.SparseTensorProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.SparseTensorProto other) {
if (other == onnx.Onnx.SparseTensorProto.getDefaultInstance()) return this;
if (other.hasValues()) {
mergeValues(other.getValues());
}
if (other.hasIndices()) {
mergeIndices(other.getIndices());
}
if (!other.dims_.isEmpty()) {
if (dims_.isEmpty()) {
dims_ = other.dims_;
dims_.makeImmutable();
bitField0_ |= 0x00000004;
} else {
ensureDimsIsMutable();
dims_.addAll(other.dims_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 10: {
input.readMessage(
getValuesFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000001;
break;
} // case 10
case 18: {
input.readMessage(
getIndicesFieldBuilder().getBuilder(),
extensionRegistry);
bitField0_ |= 0x00000002;
break;
} // case 18
case 24: {
long v = input.readInt64();
ensureDimsIsMutable();
dims_.addLong(v);
break;
} // case 24
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
ensureDimsIsMutable();
while (input.getBytesUntilLimit() > 0) {
dims_.addLong(input.readInt64());
}
input.popLimit(limit);
break;
} // case 26
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int bitField0_;
private onnx.Onnx.TensorProto values_;
private com.google.protobuf.SingleFieldBuilderV3<
onnx.Onnx.TensorProto, onnx.Onnx.TensorProto.Builder, onnx.Onnx.TensorProtoOrBuilder> valuesBuilder_;
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
*
* optional .onnx.TensorProto values = 1;
* @return Whether the values field is set.
*/
public boolean hasValues() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The sequence of non-default values are encoded as a tensor of shape [NNZ].
* The default-value is zero for numeric tensors, and empty-string for string tensors.
* values must have a non-empty name present which serves as a name for SparseTensorProto
* when used in sparse_initializer list.
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return Whether the indices field is set.
*/
public boolean hasIndices() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
* @return The indices.
*/
public onnx.Onnx.TensorProto getIndices() {
if (indicesBuilder_ == null) {
return indices_ == null ? onnx.Onnx.TensorProto.getDefaultInstance() : indices_;
} else {
return indicesBuilder_.getMessage();
}
}
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
*
* optional .onnx.TensorProto indices = 2;
*/
public Builder setIndices(onnx.Onnx.TensorProto value) {
if (indicesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
indices_ = value;
} else {
indicesBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The indices of the non-default values, which may be stored in one of two formats.
* (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value
* corresponding to the j-th index of the i-th value (in the values tensor).
* (b) Indices can be a tensor of shape [NNZ], in which case the i-th value
* must be the linearized-index of the i-th value (in the values tensor).
* The linearized-index can be converted into an index tuple (k_1,...,k_rank)
* using the shape provided below.
* The indices must appear in ascending order without duplication.
* In the first format, the ordering is lexicographic-ordering:
* e.g., index-value [1,4] must appear before [2,1]
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return A list containing the dims.
*/
public java.util.List
getDimsList() {
dims_.makeImmutable();
return dims_;
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return The count of dims.
*/
public int getDimsCount() {
return dims_.size();
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param index The index of the element to return.
* @return The dims at the given index.
*/
public long getDims(int index) {
return dims_.getLong(index);
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param index The index to set the value at.
* @param value The dims to set.
* @return This builder for chaining.
*/
public Builder setDims(
int index, long value) {
ensureDimsIsMutable();
dims_.setLong(index, value);
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param value The dims to add.
* @return This builder for chaining.
*/
public Builder addDims(long value) {
ensureDimsIsMutable();
dims_.addLong(value);
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @param values The dims to add.
* @return This builder for chaining.
*/
public Builder addAllDims(
java.lang.Iterable extends java.lang.Long> values) {
ensureDimsIsMutable();
com.google.protobuf.AbstractMessageLite.Builder.addAll(
values, dims_);
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank]
*
*
* repeated int64 dims = 3;
* @return This builder for chaining.
*/
public Builder clearDims() {
dims_ = emptyLongList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.SparseTensorProto)
}
// @@protoc_insertion_point(class_scope:onnx.SparseTensorProto)
private static final onnx.Onnx.SparseTensorProto DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.SparseTensorProto();
}
public static onnx.Onnx.SparseTensorProto getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public SparseTensorProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public onnx.Onnx.SparseTensorProto getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
public interface TensorShapeProtoOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TensorShapeProto)
com.google.protobuf.MessageOrBuilder {
/**
* repeated .onnx.TensorShapeProto.Dimension dim = 1;
*/
java.util.List
getDimList();
/**
* repeated .onnx.TensorShapeProto.Dimension dim = 1;
*/
onnx.Onnx.TensorShapeProto.Dimension getDim(int index);
/**
* repeated .onnx.TensorShapeProto.Dimension dim = 1;
*/
int getDimCount();
/**
* repeated .onnx.TensorShapeProto.Dimension dim = 1;
*/
java.util.List extends onnx.Onnx.TensorShapeProto.DimensionOrBuilder>
getDimOrBuilderList();
/**
* repeated .onnx.TensorShapeProto.Dimension dim = 1;
*/
onnx.Onnx.TensorShapeProto.DimensionOrBuilder getDimOrBuilder(
int index);
}
/**
*
* Defines a tensor shape. A dimension can be either an integer value
* or a symbolic variable. A symbolic variable represents an unknown
* dimension.
*
*
* Protobuf type {@code onnx.TensorShapeProto}
*/
public static final class TensorShapeProto extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.TensorShapeProto)
TensorShapeProtoOrBuilder {
private static final long serialVersionUID = 0L;
// Use TensorShapeProto.newBuilder() to construct.
private TensorShapeProto(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TensorShapeProto() {
dim_ = java.util.Collections.emptyList();
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new TensorShapeProto();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorShapeProto.class, onnx.Onnx.TensorShapeProto.Builder.class);
}
public interface DimensionOrBuilder extends
// @@protoc_insertion_point(interface_extends:onnx.TensorShapeProto.Dimension)
com.google.protobuf.MessageOrBuilder {
/**
* int64 dim_value = 1;
* @return Whether the dimValue field is set.
*/
boolean hasDimValue();
/**
* int64 dim_value = 1;
* @return The dimValue.
*/
long getDimValue();
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return Whether the dimParam field is set.
*/
boolean hasDimParam();
/**
*
*
* string dim_param = 2;
* @return The bytes for dimParam.
*/
com.google.protobuf.ByteString
getDimParamBytes();
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return Whether the denotation field is set.
*/
boolean hasDenotation();
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return The bytes for denotation.
*/
com.google.protobuf.ByteString
getDenotationBytes();
onnx.Onnx.TensorShapeProto.Dimension.ValueCase getValueCase();
}
/**
* Protobuf type {@code onnx.TensorShapeProto.Dimension}
*/
public static final class Dimension extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:onnx.TensorShapeProto.Dimension)
DimensionOrBuilder {
private static final long serialVersionUID = 0L;
// Use Dimension.newBuilder() to construct.
private Dimension(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private Dimension() {
denotation_ = "";
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(
UnusedPrivateParameter unused) {
return new Dimension();
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_Dimension_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_Dimension_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorShapeProto.Dimension.class, onnx.Onnx.TensorShapeProto.Dimension.Builder.class);
}
private int bitField0_;
private int valueCase_ = 0;
@SuppressWarnings("serial")
private java.lang.Object value_;
public enum ValueCase
implements com.google.protobuf.Internal.EnumLite,
com.google.protobuf.AbstractMessage.InternalOneOfEnum {
DIM_VALUE(1),
DIM_PARAM(2),
VALUE_NOT_SET(0);
private final int value;
private ValueCase(int value) {
this.value = value;
}
/**
* @param value The number of the enum to look for.
* @return The enum associated with the given number.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static ValueCase valueOf(int value) {
return forNumber(value);
}
public static ValueCase forNumber(int value) {
switch (value) {
case 1: return DIM_VALUE;
case 2: return DIM_PARAM;
case 0: return VALUE_NOT_SET;
default: return null;
}
}
public int getNumber() {
return this.value;
}
};
public ValueCase
getValueCase() {
return ValueCase.forNumber(
valueCase_);
}
public static final int DIM_VALUE_FIELD_NUMBER = 1;
/**
* int64 dim_value = 1;
* @return Whether the dimValue field is set.
*/
@java.lang.Override
public boolean hasDimValue() {
return valueCase_ == 1;
}
/**
* int64 dim_value = 1;
* @return The dimValue.
*/
@java.lang.Override
public long getDimValue() {
if (valueCase_ == 1) {
return (java.lang.Long) value_;
}
return 0L;
}
public static final int DIM_PARAM_FIELD_NUMBER = 2;
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return Whether the dimParam field is set.
*/
public boolean hasDimParam() {
return valueCase_ == 2;
}
/**
*
*
* string dim_param = 2;
* @return The bytes for dimParam.
*/
public com.google.protobuf.ByteString
getDimParamBytes() {
java.lang.Object ref = "";
if (valueCase_ == 2) {
ref = value_;
}
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
if (valueCase_ == 2) {
value_ = b;
}
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DENOTATION_FIELD_NUMBER = 3;
@SuppressWarnings("serial")
private volatile java.lang.Object denotation_ = "";
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return Whether the denotation field is set.
*/
@java.lang.Override
public boolean hasDenotation() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return The bytes for denotation.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDenotationBytes() {
java.lang.Object ref = denotation_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
denotation_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (valueCase_ == 1) {
output.writeInt64(
1, (long)((java.lang.Long) value_));
}
if (valueCase_ == 2) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_);
}
if (((bitField0_ & 0x00000001) != 0)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 3, denotation_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (valueCase_ == 1) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(
1, (long)((java.lang.Long) value_));
}
if (valueCase_ == 2) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_);
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, denotation_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof onnx.Onnx.TensorShapeProto.Dimension)) {
return super.equals(obj);
}
onnx.Onnx.TensorShapeProto.Dimension other = (onnx.Onnx.TensorShapeProto.Dimension) obj;
if (hasDenotation() != other.hasDenotation()) return false;
if (hasDenotation()) {
if (!getDenotation()
.equals(other.getDenotation())) return false;
}
if (!getValueCase().equals(other.getValueCase())) return false;
switch (valueCase_) {
case 1:
if (getDimValue()
!= other.getDimValue()) return false;
break;
case 2:
if (!getDimParam()
.equals(other.getDimParam())) return false;
break;
case 0:
default:
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasDenotation()) {
hash = (37 * hash) + DENOTATION_FIELD_NUMBER;
hash = (53 * hash) + getDenotation().hashCode();
}
switch (valueCase_) {
case 1:
hash = (37 * hash) + DIM_VALUE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getDimValue());
break;
case 2:
hash = (37 * hash) + DIM_PARAM_FIELD_NUMBER;
hash = (53 * hash) + getDimParam().hashCode();
break;
case 0:
default:
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static onnx.Onnx.TensorShapeProto.Dimension parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(onnx.Onnx.TensorShapeProto.Dimension prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code onnx.TensorShapeProto.Dimension}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:onnx.TensorShapeProto.Dimension)
onnx.Onnx.TensorShapeProto.DimensionOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_Dimension_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_Dimension_fieldAccessorTable
.ensureFieldAccessorsInitialized(
onnx.Onnx.TensorShapeProto.Dimension.class, onnx.Onnx.TensorShapeProto.Dimension.Builder.class);
}
// Construct using onnx.Onnx.TensorShapeProto.Dimension.newBuilder()
private Builder() {
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
denotation_ = "";
valueCase_ = 0;
value_ = null;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return onnx.Onnx.internal_static_onnx_TensorShapeProto_Dimension_descriptor;
}
@java.lang.Override
public onnx.Onnx.TensorShapeProto.Dimension getDefaultInstanceForType() {
return onnx.Onnx.TensorShapeProto.Dimension.getDefaultInstance();
}
@java.lang.Override
public onnx.Onnx.TensorShapeProto.Dimension build() {
onnx.Onnx.TensorShapeProto.Dimension result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public onnx.Onnx.TensorShapeProto.Dimension buildPartial() {
onnx.Onnx.TensorShapeProto.Dimension result = new onnx.Onnx.TensorShapeProto.Dimension(this);
if (bitField0_ != 0) { buildPartial0(result); }
buildPartialOneofs(result);
onBuilt();
return result;
}
private void buildPartial0(onnx.Onnx.TensorShapeProto.Dimension result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000004) != 0)) {
result.denotation_ = denotation_;
to_bitField0_ |= 0x00000001;
}
result.bitField0_ |= to_bitField0_;
}
private void buildPartialOneofs(onnx.Onnx.TensorShapeProto.Dimension result) {
result.valueCase_ = valueCase_;
result.value_ = this.value_;
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof onnx.Onnx.TensorShapeProto.Dimension) {
return mergeFrom((onnx.Onnx.TensorShapeProto.Dimension)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(onnx.Onnx.TensorShapeProto.Dimension other) {
if (other == onnx.Onnx.TensorShapeProto.Dimension.getDefaultInstance()) return this;
if (other.hasDenotation()) {
denotation_ = other.denotation_;
bitField0_ |= 0x00000004;
onChanged();
}
switch (other.getValueCase()) {
case DIM_VALUE: {
setDimValue(other.getDimValue());
break;
}
case DIM_PARAM: {
valueCase_ = 2;
value_ = other.value_;
onChanged();
break;
}
case VALUE_NOT_SET: {
break;
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
value_ = input.readInt64();
valueCase_ = 1;
break;
} // case 8
case 18: {
com.google.protobuf.ByteString bs = input.readBytes();
valueCase_ = 2;
value_ = bs;
break;
} // case 18
case 26: {
denotation_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 26
default: {
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int valueCase_ = 0;
private java.lang.Object value_;
public ValueCase
getValueCase() {
return ValueCase.forNumber(
valueCase_);
}
public Builder clearValue() {
valueCase_ = 0;
value_ = null;
onChanged();
return this;
}
private int bitField0_;
/**
* int64 dim_value = 1;
* @return Whether the dimValue field is set.
*/
public boolean hasDimValue() {
return valueCase_ == 1;
}
/**
* int64 dim_value = 1;
* @return The dimValue.
*/
public long getDimValue() {
if (valueCase_ == 1) {
return (java.lang.Long) value_;
}
return 0L;
}
/**
* int64 dim_value = 1;
* @param value The dimValue to set.
* @return This builder for chaining.
*/
public Builder setDimValue(long value) {
valueCase_ = 1;
value_ = value;
onChanged();
return this;
}
/**
* int64 dim_value = 1;
* @return This builder for chaining.
*/
public Builder clearDimValue() {
if (valueCase_ == 1) {
valueCase_ = 0;
value_ = null;
onChanged();
}
return this;
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return Whether the dimParam field is set.
*/
@java.lang.Override
public boolean hasDimParam() {
return valueCase_ == 2;
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return The dimParam.
*/
@java.lang.Override
public java.lang.String getDimParam() {
java.lang.Object ref = "";
if (valueCase_ == 2) {
ref = value_;
}
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (valueCase_ == 2) {
if (bs.isValidUtf8()) {
value_ = s;
}
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return The bytes for dimParam.
*/
@java.lang.Override
public com.google.protobuf.ByteString
getDimParamBytes() {
java.lang.Object ref = "";
if (valueCase_ == 2) {
ref = value_;
}
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
if (valueCase_ == 2) {
value_ = b;
}
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @param value The dimParam to set.
* @return This builder for chaining.
*/
public Builder setDimParam(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
valueCase_ = 2;
value_ = value;
onChanged();
return this;
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @return This builder for chaining.
*/
public Builder clearDimParam() {
if (valueCase_ == 2) {
valueCase_ = 0;
value_ = null;
onChanged();
}
return this;
}
/**
*
* namespace Shape
*
*
* string dim_param = 2;
* @param value The bytes for dimParam to set.
* @return This builder for chaining.
*/
public Builder setDimParamBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
valueCase_ = 2;
value_ = value;
onChanged();
return this;
}
private java.lang.Object denotation_ = "";
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return Whether the denotation field is set.
*/
public boolean hasDenotation() {
return ((bitField0_ & 0x00000004) != 0);
}
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @return The bytes for denotation.
*/
public com.google.protobuf.ByteString
getDenotationBytes() {
java.lang.Object ref = denotation_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
denotation_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @param value The denotation to set.
* @return This builder for chaining.
*/
public Builder setDenotation(
java.lang.String value) {
if (value == null) { throw new NullPointerException(); }
denotation_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
* Standard denotation can optionally be used to denote tensor
* dimensions with standard semantic descriptions to ensure
* that operations are applied to the correct axis of a tensor.
* Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition
* for pre-defined dimension denotations.
*
*
* optional string denotation = 3;
* @param value The bytes for denotation to set.
* @return This builder for chaining.
*/
public Builder setDenotationBytes(
com.google.protobuf.ByteString value) {
if (value == null) { throw new NullPointerException(); }
denotation_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:onnx.TensorShapeProto.Dimension)
}
// @@protoc_insertion_point(class_scope:onnx.TensorShapeProto.Dimension)
private static final onnx.Onnx.TensorShapeProto.Dimension DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new onnx.Onnx.TensorShapeProto.Dimension();
}
public static onnx.Onnx.TensorShapeProto.Dimension getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public Dimension parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser