
toco.TocoFlagsOuterClass Maven / Gradle / Ivy
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/contrib/lite/toco/toco_flags.proto
package toco;
public final class TocoFlagsOuterClass {
private TocoFlagsOuterClass() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
/**
*
* Supported I/O file formats. Some formats may be input-only or output-only.
*
*
* Protobuf enum {@code toco.FileFormat}
*/
public enum FileFormat
implements com.google.protobuf.ProtocolMessageEnum {
/**
* FILE_FORMAT_UNKNOWN = 0;
*/
FILE_FORMAT_UNKNOWN(0),
/**
*
* GraphDef, third_party/tensorflow/core/framework/graph.proto
*
*
* TENSORFLOW_GRAPHDEF = 1;
*/
TENSORFLOW_GRAPHDEF(1),
/**
*
* Tensorflow's mobile inference model.
* third_party/tensorflow/contrib/tflite/schema.fbs
*
*
* TFLITE = 2;
*/
TFLITE(2),
/**
*
* GraphViz
* Export-only.
*
*
* GRAPHVIZ_DOT = 3;
*/
GRAPHVIZ_DOT(3),
;
/**
* FILE_FORMAT_UNKNOWN = 0;
*/
public static final int FILE_FORMAT_UNKNOWN_VALUE = 0;
/**
*
* GraphDef, third_party/tensorflow/core/framework/graph.proto
*
*
* TENSORFLOW_GRAPHDEF = 1;
*/
public static final int TENSORFLOW_GRAPHDEF_VALUE = 1;
/**
*
* Tensorflow's mobile inference model.
* third_party/tensorflow/contrib/tflite/schema.fbs
*
*
* TFLITE = 2;
*/
public static final int TFLITE_VALUE = 2;
/**
*
* GraphViz
* Export-only.
*
*
* GRAPHVIZ_DOT = 3;
*/
public static final int GRAPHVIZ_DOT_VALUE = 3;
public final int getNumber() {
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static FileFormat valueOf(int value) {
return forNumber(value);
}
public static FileFormat forNumber(int value) {
switch (value) {
case 0: return FILE_FORMAT_UNKNOWN;
case 1: return TENSORFLOW_GRAPHDEF;
case 2: return TFLITE;
case 3: return GRAPHVIZ_DOT;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final com.google.protobuf.Internal.EnumLiteMap<
FileFormat> internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public FileFormat findValueByNumber(int number) {
return FileFormat.forNumber(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return toco.TocoFlagsOuterClass.getDescriptor().getEnumTypes().get(0);
}
private static final FileFormat[] VALUES = values();
public static FileFormat valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int value;
private FileFormat(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:toco.FileFormat)
}
public interface TocoFlagsOrBuilder extends
// @@protoc_insertion_point(interface_extends:toco.TocoFlags)
com.google.protobuf.MessageOrBuilder {
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
boolean hasInputFormat();
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
toco.TocoFlagsOuterClass.FileFormat getInputFormat();
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
boolean hasOutputFormat();
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
toco.TocoFlagsOuterClass.FileFormat getOutputFormat();
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
boolean hasInferenceInputType();
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
toco.Types.IODataType getInferenceInputType();
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
boolean hasInferenceType();
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
toco.Types.IODataType getInferenceType();
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
boolean hasDefaultRangesMin();
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
float getDefaultRangesMin();
/**
* optional float default_ranges_max = 6;
*/
boolean hasDefaultRangesMax();
/**
* optional float default_ranges_max = 6;
*/
float getDefaultRangesMax();
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
boolean hasDefaultInt16RangesMin();
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
float getDefaultInt16RangesMin();
/**
* optional float default_int16_ranges_max = 16;
*/
boolean hasDefaultInt16RangesMax();
/**
* optional float default_int16_ranges_max = 16;
*/
float getDefaultInt16RangesMax();
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
boolean hasDropFakeQuant();
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
boolean getDropFakeQuant();
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
boolean hasReorderAcrossFakeQuant();
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
boolean getReorderAcrossFakeQuant();
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
boolean hasAllowCustomOps();
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
boolean getAllowCustomOps();
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
boolean hasDropControlDependency();
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
boolean getDropControlDependency();
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
boolean hasDebugDisableRecurrentCellFusion();
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
boolean getDebugDisableRecurrentCellFusion();
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
boolean hasPropagateFakeQuantNumBits();
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
boolean getPropagateFakeQuantNumBits();
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
boolean hasAllowNudgingWeightsToUseFastGemmKernel();
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
boolean getAllowNudgingWeightsToUseFastGemmKernel();
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
boolean hasDedupeArrayMinSizeBytes();
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
long getDedupeArrayMinSizeBytes();
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
boolean hasSplitTfliteLstmInputs();
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
boolean getSplitTfliteLstmInputs();
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
boolean hasQuantizeWeights();
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
boolean getQuantizeWeights();
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
boolean hasDumpGraphvizDir();
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
java.lang.String getDumpGraphvizDir();
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
com.google.protobuf.ByteString
getDumpGraphvizDirBytes();
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
boolean hasDumpGraphvizIncludeVideo();
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
boolean getDumpGraphvizIncludeVideo();
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
boolean hasPostTrainingQuantize();
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
boolean getPostTrainingQuantize();
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
boolean hasAllowFlexOps();
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
boolean getAllowFlexOps();
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
boolean hasForceFlexOps();
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
boolean getForceFlexOps();
}
/**
*
* TocoFlags encodes extra parameters that drive tooling operations, that
* are not normally encoded in model files and in general may not be thought
* of as properties of models, instead describing how models are to be
* processed in the context of the present tooling job.
* Next ID to use: 29.
*
*
* Protobuf type {@code toco.TocoFlags}
*/
public static final class TocoFlags extends
com.google.protobuf.GeneratedMessageV3 implements
// @@protoc_insertion_point(message_implements:toco.TocoFlags)
TocoFlagsOrBuilder {
private static final long serialVersionUID = 0L;
// Use TocoFlags.newBuilder() to construct.
private TocoFlags(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private TocoFlags() {
inputFormat_ = 0;
outputFormat_ = 0;
inferenceInputType_ = 0;
inferenceType_ = 0;
defaultRangesMin_ = 0F;
defaultRangesMax_ = 0F;
defaultInt16RangesMin_ = 0F;
defaultInt16RangesMax_ = 0F;
dropFakeQuant_ = false;
reorderAcrossFakeQuant_ = false;
allowCustomOps_ = false;
dropControlDependency_ = false;
debugDisableRecurrentCellFusion_ = false;
propagateFakeQuantNumBits_ = false;
allowNudgingWeightsToUseFastGemmKernel_ = false;
dedupeArrayMinSizeBytes_ = 64L;
splitTfliteLstmInputs_ = true;
quantizeWeights_ = false;
dumpGraphvizDir_ = "";
dumpGraphvizIncludeVideo_ = false;
postTrainingQuantize_ = false;
allowFlexOps_ = false;
forceFlexOps_ = false;
}
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TocoFlags(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
this();
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 8: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat value = toco.TocoFlagsOuterClass.FileFormat.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
inputFormat_ = rawValue;
}
break;
}
case 16: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat value = toco.TocoFlagsOuterClass.FileFormat.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
outputFormat_ = rawValue;
}
break;
}
case 32: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
toco.Types.IODataType value = toco.Types.IODataType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
inferenceType_ = rawValue;
}
break;
}
case 45: {
bitField0_ |= 0x00000010;
defaultRangesMin_ = input.readFloat();
break;
}
case 53: {
bitField0_ |= 0x00000020;
defaultRangesMax_ = input.readFloat();
break;
}
case 56: {
bitField0_ |= 0x00000100;
dropFakeQuant_ = input.readBool();
break;
}
case 64: {
bitField0_ |= 0x00000200;
reorderAcrossFakeQuant_ = input.readBool();
break;
}
case 80: {
bitField0_ |= 0x00000400;
allowCustomOps_ = input.readBool();
break;
}
case 88: {
int rawValue = input.readEnum();
@SuppressWarnings("deprecation")
toco.Types.IODataType value = toco.Types.IODataType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(11, rawValue);
} else {
bitField0_ |= 0x00000004;
inferenceInputType_ = rawValue;
}
break;
}
case 96: {
bitField0_ |= 0x00000800;
dropControlDependency_ = input.readBool();
break;
}
case 104: {
bitField0_ |= 0x00001000;
debugDisableRecurrentCellFusion_ = input.readBool();
break;
}
case 112: {
bitField0_ |= 0x00002000;
propagateFakeQuantNumBits_ = input.readBool();
break;
}
case 125: {
bitField0_ |= 0x00000040;
defaultInt16RangesMin_ = input.readFloat();
break;
}
case 133: {
bitField0_ |= 0x00000080;
defaultInt16RangesMax_ = input.readFloat();
break;
}
case 136: {
bitField0_ |= 0x00004000;
allowNudgingWeightsToUseFastGemmKernel_ = input.readBool();
break;
}
case 144: {
bitField0_ |= 0x00008000;
dedupeArrayMinSizeBytes_ = input.readInt64();
break;
}
case 152: {
bitField0_ |= 0x00010000;
splitTfliteLstmInputs_ = input.readBool();
break;
}
case 160: {
bitField0_ |= 0x00020000;
quantizeWeights_ = input.readBool();
break;
}
case 194: {
com.google.protobuf.ByteString bs = input.readBytes();
bitField0_ |= 0x00040000;
dumpGraphvizDir_ = bs;
break;
}
case 200: {
bitField0_ |= 0x00080000;
dumpGraphvizIncludeVideo_ = input.readBool();
break;
}
case 208: {
bitField0_ |= 0x00100000;
postTrainingQuantize_ = input.readBool();
break;
}
case 216: {
bitField0_ |= 0x00200000;
allowFlexOps_ = input.readBool();
break;
}
case 224: {
bitField0_ |= 0x00400000;
forceFlexOps_ = input.readBool();
break;
}
default: {
if (!parseUnknownField(
input, unknownFields, extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return toco.TocoFlagsOuterClass.internal_static_toco_TocoFlags_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return toco.TocoFlagsOuterClass.internal_static_toco_TocoFlags_fieldAccessorTable
.ensureFieldAccessorsInitialized(
toco.TocoFlagsOuterClass.TocoFlags.class, toco.TocoFlagsOuterClass.TocoFlags.Builder.class);
}
private int bitField0_;
public static final int INPUT_FORMAT_FIELD_NUMBER = 1;
private int inputFormat_;
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public boolean hasInputFormat() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public toco.TocoFlagsOuterClass.FileFormat getInputFormat() {
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat result = toco.TocoFlagsOuterClass.FileFormat.valueOf(inputFormat_);
return result == null ? toco.TocoFlagsOuterClass.FileFormat.FILE_FORMAT_UNKNOWN : result;
}
public static final int OUTPUT_FORMAT_FIELD_NUMBER = 2;
private int outputFormat_;
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public boolean hasOutputFormat() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public toco.TocoFlagsOuterClass.FileFormat getOutputFormat() {
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat result = toco.TocoFlagsOuterClass.FileFormat.valueOf(outputFormat_);
return result == null ? toco.TocoFlagsOuterClass.FileFormat.FILE_FORMAT_UNKNOWN : result;
}
public static final int INFERENCE_INPUT_TYPE_FIELD_NUMBER = 11;
private int inferenceInputType_;
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public boolean hasInferenceInputType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public toco.Types.IODataType getInferenceInputType() {
@SuppressWarnings("deprecation")
toco.Types.IODataType result = toco.Types.IODataType.valueOf(inferenceInputType_);
return result == null ? toco.Types.IODataType.IO_DATA_TYPE_UNKNOWN : result;
}
public static final int INFERENCE_TYPE_FIELD_NUMBER = 4;
private int inferenceType_;
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public boolean hasInferenceType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public toco.Types.IODataType getInferenceType() {
@SuppressWarnings("deprecation")
toco.Types.IODataType result = toco.Types.IODataType.valueOf(inferenceType_);
return result == null ? toco.Types.IODataType.IO_DATA_TYPE_UNKNOWN : result;
}
public static final int DEFAULT_RANGES_MIN_FIELD_NUMBER = 5;
private float defaultRangesMin_;
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public boolean hasDefaultRangesMin() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public float getDefaultRangesMin() {
return defaultRangesMin_;
}
public static final int DEFAULT_RANGES_MAX_FIELD_NUMBER = 6;
private float defaultRangesMax_;
/**
* optional float default_ranges_max = 6;
*/
public boolean hasDefaultRangesMax() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional float default_ranges_max = 6;
*/
public float getDefaultRangesMax() {
return defaultRangesMax_;
}
public static final int DEFAULT_INT16_RANGES_MIN_FIELD_NUMBER = 15;
private float defaultInt16RangesMin_;
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public boolean hasDefaultInt16RangesMin() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public float getDefaultInt16RangesMin() {
return defaultInt16RangesMin_;
}
public static final int DEFAULT_INT16_RANGES_MAX_FIELD_NUMBER = 16;
private float defaultInt16RangesMax_;
/**
* optional float default_int16_ranges_max = 16;
*/
public boolean hasDefaultInt16RangesMax() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional float default_int16_ranges_max = 16;
*/
public float getDefaultInt16RangesMax() {
return defaultInt16RangesMax_;
}
public static final int DROP_FAKE_QUANT_FIELD_NUMBER = 7;
private boolean dropFakeQuant_;
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public boolean hasDropFakeQuant() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public boolean getDropFakeQuant() {
return dropFakeQuant_;
}
public static final int REORDER_ACROSS_FAKE_QUANT_FIELD_NUMBER = 8;
private boolean reorderAcrossFakeQuant_;
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public boolean hasReorderAcrossFakeQuant() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public boolean getReorderAcrossFakeQuant() {
return reorderAcrossFakeQuant_;
}
public static final int ALLOW_CUSTOM_OPS_FIELD_NUMBER = 10;
private boolean allowCustomOps_;
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public boolean hasAllowCustomOps() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public boolean getAllowCustomOps() {
return allowCustomOps_;
}
public static final int DROP_CONTROL_DEPENDENCY_FIELD_NUMBER = 12;
private boolean dropControlDependency_;
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public boolean hasDropControlDependency() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public boolean getDropControlDependency() {
return dropControlDependency_;
}
public static final int DEBUG_DISABLE_RECURRENT_CELL_FUSION_FIELD_NUMBER = 13;
private boolean debugDisableRecurrentCellFusion_;
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public boolean hasDebugDisableRecurrentCellFusion() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public boolean getDebugDisableRecurrentCellFusion() {
return debugDisableRecurrentCellFusion_;
}
public static final int PROPAGATE_FAKE_QUANT_NUM_BITS_FIELD_NUMBER = 14;
private boolean propagateFakeQuantNumBits_;
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public boolean hasPropagateFakeQuantNumBits() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public boolean getPropagateFakeQuantNumBits() {
return propagateFakeQuantNumBits_;
}
public static final int ALLOW_NUDGING_WEIGHTS_TO_USE_FAST_GEMM_KERNEL_FIELD_NUMBER = 17;
private boolean allowNudgingWeightsToUseFastGemmKernel_;
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public boolean hasAllowNudgingWeightsToUseFastGemmKernel() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public boolean getAllowNudgingWeightsToUseFastGemmKernel() {
return allowNudgingWeightsToUseFastGemmKernel_;
}
public static final int DEDUPE_ARRAY_MIN_SIZE_BYTES_FIELD_NUMBER = 18;
private long dedupeArrayMinSizeBytes_;
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public boolean hasDedupeArrayMinSizeBytes() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public long getDedupeArrayMinSizeBytes() {
return dedupeArrayMinSizeBytes_;
}
public static final int SPLIT_TFLITE_LSTM_INPUTS_FIELD_NUMBER = 19;
private boolean splitTfliteLstmInputs_;
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public boolean hasSplitTfliteLstmInputs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public boolean getSplitTfliteLstmInputs() {
return splitTfliteLstmInputs_;
}
public static final int QUANTIZE_WEIGHTS_FIELD_NUMBER = 20;
private boolean quantizeWeights_;
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public boolean hasQuantizeWeights() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public boolean getQuantizeWeights() {
return quantizeWeights_;
}
public static final int DUMP_GRAPHVIZ_DIR_FIELD_NUMBER = 24;
private volatile java.lang.Object dumpGraphvizDir_;
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public boolean hasDumpGraphvizDir() {
return ((bitField0_ & 0x00040000) == 0x00040000);
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public java.lang.String getDumpGraphvizDir() {
java.lang.Object ref = dumpGraphvizDir_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
dumpGraphvizDir_ = s;
}
return s;
}
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public com.google.protobuf.ByteString
getDumpGraphvizDirBytes() {
java.lang.Object ref = dumpGraphvizDir_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
dumpGraphvizDir_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
public static final int DUMP_GRAPHVIZ_INCLUDE_VIDEO_FIELD_NUMBER = 25;
private boolean dumpGraphvizIncludeVideo_;
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public boolean hasDumpGraphvizIncludeVideo() {
return ((bitField0_ & 0x00080000) == 0x00080000);
}
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public boolean getDumpGraphvizIncludeVideo() {
return dumpGraphvizIncludeVideo_;
}
public static final int POST_TRAINING_QUANTIZE_FIELD_NUMBER = 26;
private boolean postTrainingQuantize_;
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public boolean hasPostTrainingQuantize() {
return ((bitField0_ & 0x00100000) == 0x00100000);
}
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public boolean getPostTrainingQuantize() {
return postTrainingQuantize_;
}
public static final int ALLOW_FLEX_OPS_FIELD_NUMBER = 27;
private boolean allowFlexOps_;
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public boolean hasAllowFlexOps() {
return ((bitField0_ & 0x00200000) == 0x00200000);
}
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public boolean getAllowFlexOps() {
return allowFlexOps_;
}
public static final int FORCE_FLEX_OPS_FIELD_NUMBER = 28;
private boolean forceFlexOps_;
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public boolean hasForceFlexOps() {
return ((bitField0_ & 0x00400000) == 0x00400000);
}
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public boolean getForceFlexOps() {
return forceFlexOps_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, inputFormat_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, outputFormat_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, inferenceType_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeFloat(5, defaultRangesMin_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeFloat(6, defaultRangesMax_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBool(7, dropFakeQuant_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeBool(8, reorderAcrossFakeQuant_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeBool(10, allowCustomOps_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeEnum(11, inferenceInputType_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeBool(12, dropControlDependency_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeBool(13, debugDisableRecurrentCellFusion_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeBool(14, propagateFakeQuantNumBits_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeFloat(15, defaultInt16RangesMin_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeFloat(16, defaultInt16RangesMax_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeBool(17, allowNudgingWeightsToUseFastGemmKernel_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeInt64(18, dedupeArrayMinSizeBytes_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeBool(19, splitTfliteLstmInputs_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
output.writeBool(20, quantizeWeights_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 24, dumpGraphvizDir_);
}
if (((bitField0_ & 0x00080000) == 0x00080000)) {
output.writeBool(25, dumpGraphvizIncludeVideo_);
}
if (((bitField0_ & 0x00100000) == 0x00100000)) {
output.writeBool(26, postTrainingQuantize_);
}
if (((bitField0_ & 0x00200000) == 0x00200000)) {
output.writeBool(27, allowFlexOps_);
}
if (((bitField0_ & 0x00400000) == 0x00400000)) {
output.writeBool(28, forceFlexOps_);
}
unknownFields.writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, inputFormat_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, outputFormat_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, inferenceType_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(5, defaultRangesMin_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(6, defaultRangesMax_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(7, dropFakeQuant_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, reorderAcrossFakeQuant_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(10, allowCustomOps_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(11, inferenceInputType_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(12, dropControlDependency_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(13, debugDisableRecurrentCellFusion_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(14, propagateFakeQuantNumBits_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(15, defaultInt16RangesMin_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, defaultInt16RangesMax_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(17, allowNudgingWeightsToUseFastGemmKernel_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(18, dedupeArrayMinSizeBytes_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(19, splitTfliteLstmInputs_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(20, quantizeWeights_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(24, dumpGraphvizDir_);
}
if (((bitField0_ & 0x00080000) == 0x00080000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(25, dumpGraphvizIncludeVideo_);
}
if (((bitField0_ & 0x00100000) == 0x00100000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(26, postTrainingQuantize_);
}
if (((bitField0_ & 0x00200000) == 0x00200000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(27, allowFlexOps_);
}
if (((bitField0_ & 0x00400000) == 0x00400000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(28, forceFlexOps_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof toco.TocoFlagsOuterClass.TocoFlags)) {
return super.equals(obj);
}
toco.TocoFlagsOuterClass.TocoFlags other = (toco.TocoFlagsOuterClass.TocoFlags) obj;
boolean result = true;
result = result && (hasInputFormat() == other.hasInputFormat());
if (hasInputFormat()) {
result = result && inputFormat_ == other.inputFormat_;
}
result = result && (hasOutputFormat() == other.hasOutputFormat());
if (hasOutputFormat()) {
result = result && outputFormat_ == other.outputFormat_;
}
result = result && (hasInferenceInputType() == other.hasInferenceInputType());
if (hasInferenceInputType()) {
result = result && inferenceInputType_ == other.inferenceInputType_;
}
result = result && (hasInferenceType() == other.hasInferenceType());
if (hasInferenceType()) {
result = result && inferenceType_ == other.inferenceType_;
}
result = result && (hasDefaultRangesMin() == other.hasDefaultRangesMin());
if (hasDefaultRangesMin()) {
result = result && (
java.lang.Float.floatToIntBits(getDefaultRangesMin())
== java.lang.Float.floatToIntBits(
other.getDefaultRangesMin()));
}
result = result && (hasDefaultRangesMax() == other.hasDefaultRangesMax());
if (hasDefaultRangesMax()) {
result = result && (
java.lang.Float.floatToIntBits(getDefaultRangesMax())
== java.lang.Float.floatToIntBits(
other.getDefaultRangesMax()));
}
result = result && (hasDefaultInt16RangesMin() == other.hasDefaultInt16RangesMin());
if (hasDefaultInt16RangesMin()) {
result = result && (
java.lang.Float.floatToIntBits(getDefaultInt16RangesMin())
== java.lang.Float.floatToIntBits(
other.getDefaultInt16RangesMin()));
}
result = result && (hasDefaultInt16RangesMax() == other.hasDefaultInt16RangesMax());
if (hasDefaultInt16RangesMax()) {
result = result && (
java.lang.Float.floatToIntBits(getDefaultInt16RangesMax())
== java.lang.Float.floatToIntBits(
other.getDefaultInt16RangesMax()));
}
result = result && (hasDropFakeQuant() == other.hasDropFakeQuant());
if (hasDropFakeQuant()) {
result = result && (getDropFakeQuant()
== other.getDropFakeQuant());
}
result = result && (hasReorderAcrossFakeQuant() == other.hasReorderAcrossFakeQuant());
if (hasReorderAcrossFakeQuant()) {
result = result && (getReorderAcrossFakeQuant()
== other.getReorderAcrossFakeQuant());
}
result = result && (hasAllowCustomOps() == other.hasAllowCustomOps());
if (hasAllowCustomOps()) {
result = result && (getAllowCustomOps()
== other.getAllowCustomOps());
}
result = result && (hasDropControlDependency() == other.hasDropControlDependency());
if (hasDropControlDependency()) {
result = result && (getDropControlDependency()
== other.getDropControlDependency());
}
result = result && (hasDebugDisableRecurrentCellFusion() == other.hasDebugDisableRecurrentCellFusion());
if (hasDebugDisableRecurrentCellFusion()) {
result = result && (getDebugDisableRecurrentCellFusion()
== other.getDebugDisableRecurrentCellFusion());
}
result = result && (hasPropagateFakeQuantNumBits() == other.hasPropagateFakeQuantNumBits());
if (hasPropagateFakeQuantNumBits()) {
result = result && (getPropagateFakeQuantNumBits()
== other.getPropagateFakeQuantNumBits());
}
result = result && (hasAllowNudgingWeightsToUseFastGemmKernel() == other.hasAllowNudgingWeightsToUseFastGemmKernel());
if (hasAllowNudgingWeightsToUseFastGemmKernel()) {
result = result && (getAllowNudgingWeightsToUseFastGemmKernel()
== other.getAllowNudgingWeightsToUseFastGemmKernel());
}
result = result && (hasDedupeArrayMinSizeBytes() == other.hasDedupeArrayMinSizeBytes());
if (hasDedupeArrayMinSizeBytes()) {
result = result && (getDedupeArrayMinSizeBytes()
== other.getDedupeArrayMinSizeBytes());
}
result = result && (hasSplitTfliteLstmInputs() == other.hasSplitTfliteLstmInputs());
if (hasSplitTfliteLstmInputs()) {
result = result && (getSplitTfliteLstmInputs()
== other.getSplitTfliteLstmInputs());
}
result = result && (hasQuantizeWeights() == other.hasQuantizeWeights());
if (hasQuantizeWeights()) {
result = result && (getQuantizeWeights()
== other.getQuantizeWeights());
}
result = result && (hasDumpGraphvizDir() == other.hasDumpGraphvizDir());
if (hasDumpGraphvizDir()) {
result = result && getDumpGraphvizDir()
.equals(other.getDumpGraphvizDir());
}
result = result && (hasDumpGraphvizIncludeVideo() == other.hasDumpGraphvizIncludeVideo());
if (hasDumpGraphvizIncludeVideo()) {
result = result && (getDumpGraphvizIncludeVideo()
== other.getDumpGraphvizIncludeVideo());
}
result = result && (hasPostTrainingQuantize() == other.hasPostTrainingQuantize());
if (hasPostTrainingQuantize()) {
result = result && (getPostTrainingQuantize()
== other.getPostTrainingQuantize());
}
result = result && (hasAllowFlexOps() == other.hasAllowFlexOps());
if (hasAllowFlexOps()) {
result = result && (getAllowFlexOps()
== other.getAllowFlexOps());
}
result = result && (hasForceFlexOps() == other.hasForceFlexOps());
if (hasForceFlexOps()) {
result = result && (getForceFlexOps()
== other.getForceFlexOps());
}
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasInputFormat()) {
hash = (37 * hash) + INPUT_FORMAT_FIELD_NUMBER;
hash = (53 * hash) + inputFormat_;
}
if (hasOutputFormat()) {
hash = (37 * hash) + OUTPUT_FORMAT_FIELD_NUMBER;
hash = (53 * hash) + outputFormat_;
}
if (hasInferenceInputType()) {
hash = (37 * hash) + INFERENCE_INPUT_TYPE_FIELD_NUMBER;
hash = (53 * hash) + inferenceInputType_;
}
if (hasInferenceType()) {
hash = (37 * hash) + INFERENCE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + inferenceType_;
}
if (hasDefaultRangesMin()) {
hash = (37 * hash) + DEFAULT_RANGES_MIN_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getDefaultRangesMin());
}
if (hasDefaultRangesMax()) {
hash = (37 * hash) + DEFAULT_RANGES_MAX_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getDefaultRangesMax());
}
if (hasDefaultInt16RangesMin()) {
hash = (37 * hash) + DEFAULT_INT16_RANGES_MIN_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getDefaultInt16RangesMin());
}
if (hasDefaultInt16RangesMax()) {
hash = (37 * hash) + DEFAULT_INT16_RANGES_MAX_FIELD_NUMBER;
hash = (53 * hash) + java.lang.Float.floatToIntBits(
getDefaultInt16RangesMax());
}
if (hasDropFakeQuant()) {
hash = (37 * hash) + DROP_FAKE_QUANT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getDropFakeQuant());
}
if (hasReorderAcrossFakeQuant()) {
hash = (37 * hash) + REORDER_ACROSS_FAKE_QUANT_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getReorderAcrossFakeQuant());
}
if (hasAllowCustomOps()) {
hash = (37 * hash) + ALLOW_CUSTOM_OPS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowCustomOps());
}
if (hasDropControlDependency()) {
hash = (37 * hash) + DROP_CONTROL_DEPENDENCY_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getDropControlDependency());
}
if (hasDebugDisableRecurrentCellFusion()) {
hash = (37 * hash) + DEBUG_DISABLE_RECURRENT_CELL_FUSION_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getDebugDisableRecurrentCellFusion());
}
if (hasPropagateFakeQuantNumBits()) {
hash = (37 * hash) + PROPAGATE_FAKE_QUANT_NUM_BITS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getPropagateFakeQuantNumBits());
}
if (hasAllowNudgingWeightsToUseFastGemmKernel()) {
hash = (37 * hash) + ALLOW_NUDGING_WEIGHTS_TO_USE_FAST_GEMM_KERNEL_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowNudgingWeightsToUseFastGemmKernel());
}
if (hasDedupeArrayMinSizeBytes()) {
hash = (37 * hash) + DEDUPE_ARRAY_MIN_SIZE_BYTES_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(
getDedupeArrayMinSizeBytes());
}
if (hasSplitTfliteLstmInputs()) {
hash = (37 * hash) + SPLIT_TFLITE_LSTM_INPUTS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getSplitTfliteLstmInputs());
}
if (hasQuantizeWeights()) {
hash = (37 * hash) + QUANTIZE_WEIGHTS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getQuantizeWeights());
}
if (hasDumpGraphvizDir()) {
hash = (37 * hash) + DUMP_GRAPHVIZ_DIR_FIELD_NUMBER;
hash = (53 * hash) + getDumpGraphvizDir().hashCode();
}
if (hasDumpGraphvizIncludeVideo()) {
hash = (37 * hash) + DUMP_GRAPHVIZ_INCLUDE_VIDEO_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getDumpGraphvizIncludeVideo());
}
if (hasPostTrainingQuantize()) {
hash = (37 * hash) + POST_TRAINING_QUANTIZE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getPostTrainingQuantize());
}
if (hasAllowFlexOps()) {
hash = (37 * hash) + ALLOW_FLEX_OPS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getAllowFlexOps());
}
if (hasForceFlexOps()) {
hash = (37 * hash) + FORCE_FLEX_OPS_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(
getForceFlexOps());
}
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
java.nio.ByteBuffer data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseDelimitedWithIOException(PARSER, input, extensionRegistry);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input);
}
public static toco.TocoFlagsOuterClass.TocoFlags parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3
.parseWithIOException(PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(toco.TocoFlagsOuterClass.TocoFlags prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE
? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
* TocoFlags encodes extra parameters that drive tooling operations, that
* are not normally encoded in model files and in general may not be thought
* of as properties of models, instead describing how models are to be
* processed in the context of the present tooling job.
* Next ID to use: 29.
*
*
* Protobuf type {@code toco.TocoFlags}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessageV3.Builder implements
// @@protoc_insertion_point(builder_implements:toco.TocoFlags)
toco.TocoFlagsOuterClass.TocoFlagsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return toco.TocoFlagsOuterClass.internal_static_toco_TocoFlags_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return toco.TocoFlagsOuterClass.internal_static_toco_TocoFlags_fieldAccessorTable
.ensureFieldAccessorsInitialized(
toco.TocoFlagsOuterClass.TocoFlags.class, toco.TocoFlagsOuterClass.TocoFlags.Builder.class);
}
// Construct using toco.TocoFlagsOuterClass.TocoFlags.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3
.alwaysUseFieldBuilders) {
}
}
@java.lang.Override
public Builder clear() {
super.clear();
inputFormat_ = 0;
bitField0_ = (bitField0_ & ~0x00000001);
outputFormat_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
inferenceInputType_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
inferenceType_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
defaultRangesMin_ = 0F;
bitField0_ = (bitField0_ & ~0x00000010);
defaultRangesMax_ = 0F;
bitField0_ = (bitField0_ & ~0x00000020);
defaultInt16RangesMin_ = 0F;
bitField0_ = (bitField0_ & ~0x00000040);
defaultInt16RangesMax_ = 0F;
bitField0_ = (bitField0_ & ~0x00000080);
dropFakeQuant_ = false;
bitField0_ = (bitField0_ & ~0x00000100);
reorderAcrossFakeQuant_ = false;
bitField0_ = (bitField0_ & ~0x00000200);
allowCustomOps_ = false;
bitField0_ = (bitField0_ & ~0x00000400);
dropControlDependency_ = false;
bitField0_ = (bitField0_ & ~0x00000800);
debugDisableRecurrentCellFusion_ = false;
bitField0_ = (bitField0_ & ~0x00001000);
propagateFakeQuantNumBits_ = false;
bitField0_ = (bitField0_ & ~0x00002000);
allowNudgingWeightsToUseFastGemmKernel_ = false;
bitField0_ = (bitField0_ & ~0x00004000);
dedupeArrayMinSizeBytes_ = 64L;
bitField0_ = (bitField0_ & ~0x00008000);
splitTfliteLstmInputs_ = true;
bitField0_ = (bitField0_ & ~0x00010000);
quantizeWeights_ = false;
bitField0_ = (bitField0_ & ~0x00020000);
dumpGraphvizDir_ = "";
bitField0_ = (bitField0_ & ~0x00040000);
dumpGraphvizIncludeVideo_ = false;
bitField0_ = (bitField0_ & ~0x00080000);
postTrainingQuantize_ = false;
bitField0_ = (bitField0_ & ~0x00100000);
allowFlexOps_ = false;
bitField0_ = (bitField0_ & ~0x00200000);
forceFlexOps_ = false;
bitField0_ = (bitField0_ & ~0x00400000);
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return toco.TocoFlagsOuterClass.internal_static_toco_TocoFlags_descriptor;
}
@java.lang.Override
public toco.TocoFlagsOuterClass.TocoFlags getDefaultInstanceForType() {
return toco.TocoFlagsOuterClass.TocoFlags.getDefaultInstance();
}
@java.lang.Override
public toco.TocoFlagsOuterClass.TocoFlags build() {
toco.TocoFlagsOuterClass.TocoFlags result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public toco.TocoFlagsOuterClass.TocoFlags buildPartial() {
toco.TocoFlagsOuterClass.TocoFlags result = new toco.TocoFlagsOuterClass.TocoFlags(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.inputFormat_ = inputFormat_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.outputFormat_ = outputFormat_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.inferenceInputType_ = inferenceInputType_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.inferenceType_ = inferenceType_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.defaultRangesMin_ = defaultRangesMin_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.defaultRangesMax_ = defaultRangesMax_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.defaultInt16RangesMin_ = defaultInt16RangesMin_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.defaultInt16RangesMax_ = defaultInt16RangesMax_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.dropFakeQuant_ = dropFakeQuant_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.reorderAcrossFakeQuant_ = reorderAcrossFakeQuant_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.allowCustomOps_ = allowCustomOps_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.dropControlDependency_ = dropControlDependency_;
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.debugDisableRecurrentCellFusion_ = debugDisableRecurrentCellFusion_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00002000;
}
result.propagateFakeQuantNumBits_ = propagateFakeQuantNumBits_;
if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
to_bitField0_ |= 0x00004000;
}
result.allowNudgingWeightsToUseFastGemmKernel_ = allowNudgingWeightsToUseFastGemmKernel_;
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00008000;
}
result.dedupeArrayMinSizeBytes_ = dedupeArrayMinSizeBytes_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
result.splitTfliteLstmInputs_ = splitTfliteLstmInputs_;
if (((from_bitField0_ & 0x00020000) == 0x00020000)) {
to_bitField0_ |= 0x00020000;
}
result.quantizeWeights_ = quantizeWeights_;
if (((from_bitField0_ & 0x00040000) == 0x00040000)) {
to_bitField0_ |= 0x00040000;
}
result.dumpGraphvizDir_ = dumpGraphvizDir_;
if (((from_bitField0_ & 0x00080000) == 0x00080000)) {
to_bitField0_ |= 0x00080000;
}
result.dumpGraphvizIncludeVideo_ = dumpGraphvizIncludeVideo_;
if (((from_bitField0_ & 0x00100000) == 0x00100000)) {
to_bitField0_ |= 0x00100000;
}
result.postTrainingQuantize_ = postTrainingQuantize_;
if (((from_bitField0_ & 0x00200000) == 0x00200000)) {
to_bitField0_ |= 0x00200000;
}
result.allowFlexOps_ = allowFlexOps_;
if (((from_bitField0_ & 0x00400000) == 0x00400000)) {
to_bitField0_ |= 0x00400000;
}
result.forceFlexOps_ = forceFlexOps_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
@java.lang.Override
public Builder clone() {
return (Builder) super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.setField(field, value);
}
@java.lang.Override
public Builder clearField(
com.google.protobuf.Descriptors.FieldDescriptor field) {
return (Builder) super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(
com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return (Builder) super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
int index, java.lang.Object value) {
return (Builder) super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field,
java.lang.Object value) {
return (Builder) super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof toco.TocoFlagsOuterClass.TocoFlags) {
return mergeFrom((toco.TocoFlagsOuterClass.TocoFlags)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(toco.TocoFlagsOuterClass.TocoFlags other) {
if (other == toco.TocoFlagsOuterClass.TocoFlags.getDefaultInstance()) return this;
if (other.hasInputFormat()) {
setInputFormat(other.getInputFormat());
}
if (other.hasOutputFormat()) {
setOutputFormat(other.getOutputFormat());
}
if (other.hasInferenceInputType()) {
setInferenceInputType(other.getInferenceInputType());
}
if (other.hasInferenceType()) {
setInferenceType(other.getInferenceType());
}
if (other.hasDefaultRangesMin()) {
setDefaultRangesMin(other.getDefaultRangesMin());
}
if (other.hasDefaultRangesMax()) {
setDefaultRangesMax(other.getDefaultRangesMax());
}
if (other.hasDefaultInt16RangesMin()) {
setDefaultInt16RangesMin(other.getDefaultInt16RangesMin());
}
if (other.hasDefaultInt16RangesMax()) {
setDefaultInt16RangesMax(other.getDefaultInt16RangesMax());
}
if (other.hasDropFakeQuant()) {
setDropFakeQuant(other.getDropFakeQuant());
}
if (other.hasReorderAcrossFakeQuant()) {
setReorderAcrossFakeQuant(other.getReorderAcrossFakeQuant());
}
if (other.hasAllowCustomOps()) {
setAllowCustomOps(other.getAllowCustomOps());
}
if (other.hasDropControlDependency()) {
setDropControlDependency(other.getDropControlDependency());
}
if (other.hasDebugDisableRecurrentCellFusion()) {
setDebugDisableRecurrentCellFusion(other.getDebugDisableRecurrentCellFusion());
}
if (other.hasPropagateFakeQuantNumBits()) {
setPropagateFakeQuantNumBits(other.getPropagateFakeQuantNumBits());
}
if (other.hasAllowNudgingWeightsToUseFastGemmKernel()) {
setAllowNudgingWeightsToUseFastGemmKernel(other.getAllowNudgingWeightsToUseFastGemmKernel());
}
if (other.hasDedupeArrayMinSizeBytes()) {
setDedupeArrayMinSizeBytes(other.getDedupeArrayMinSizeBytes());
}
if (other.hasSplitTfliteLstmInputs()) {
setSplitTfliteLstmInputs(other.getSplitTfliteLstmInputs());
}
if (other.hasQuantizeWeights()) {
setQuantizeWeights(other.getQuantizeWeights());
}
if (other.hasDumpGraphvizDir()) {
bitField0_ |= 0x00040000;
dumpGraphvizDir_ = other.dumpGraphvizDir_;
onChanged();
}
if (other.hasDumpGraphvizIncludeVideo()) {
setDumpGraphvizIncludeVideo(other.getDumpGraphvizIncludeVideo());
}
if (other.hasPostTrainingQuantize()) {
setPostTrainingQuantize(other.getPostTrainingQuantize());
}
if (other.hasAllowFlexOps()) {
setAllowFlexOps(other.getAllowFlexOps());
}
if (other.hasForceFlexOps()) {
setForceFlexOps(other.getForceFlexOps());
}
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
toco.TocoFlagsOuterClass.TocoFlags parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (toco.TocoFlagsOuterClass.TocoFlags) e.getUnfinishedMessage();
throw e.unwrapIOException();
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
private int inputFormat_ = 0;
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public boolean hasInputFormat() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public toco.TocoFlagsOuterClass.FileFormat getInputFormat() {
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat result = toco.TocoFlagsOuterClass.FileFormat.valueOf(inputFormat_);
return result == null ? toco.TocoFlagsOuterClass.FileFormat.FILE_FORMAT_UNKNOWN : result;
}
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public Builder setInputFormat(toco.TocoFlagsOuterClass.FileFormat value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
inputFormat_ = value.getNumber();
onChanged();
return this;
}
/**
*
* Input file format
*
*
* optional .toco.FileFormat input_format = 1;
*/
public Builder clearInputFormat() {
bitField0_ = (bitField0_ & ~0x00000001);
inputFormat_ = 0;
onChanged();
return this;
}
private int outputFormat_ = 0;
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public boolean hasOutputFormat() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public toco.TocoFlagsOuterClass.FileFormat getOutputFormat() {
@SuppressWarnings("deprecation")
toco.TocoFlagsOuterClass.FileFormat result = toco.TocoFlagsOuterClass.FileFormat.valueOf(outputFormat_);
return result == null ? toco.TocoFlagsOuterClass.FileFormat.FILE_FORMAT_UNKNOWN : result;
}
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public Builder setOutputFormat(toco.TocoFlagsOuterClass.FileFormat value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
outputFormat_ = value.getNumber();
onChanged();
return this;
}
/**
*
* Output file format
*
*
* optional .toco.FileFormat output_format = 2;
*/
public Builder clearOutputFormat() {
bitField0_ = (bitField0_ & ~0x00000002);
outputFormat_ = 0;
onChanged();
return this;
}
private int inferenceInputType_ = 0;
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public boolean hasInferenceInputType() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public toco.Types.IODataType getInferenceInputType() {
@SuppressWarnings("deprecation")
toco.Types.IODataType result = toco.Types.IODataType.valueOf(inferenceInputType_);
return result == null ? toco.Types.IODataType.IO_DATA_TYPE_UNKNOWN : result;
}
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public Builder setInferenceInputType(toco.Types.IODataType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
inferenceInputType_ = value.getNumber();
onChanged();
return this;
}
/**
*
* Similar to inference_type, but allows to control specifically the
* quantization of input arrays, separately from other arrays.
* If not set, then the value of inference_type is implicitly used, i.e.
* by default input arrays are quantized like other arrays.
* Like inference_type, this only affects real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* The typical use for this flag is for vision models taking a bitmap
* as input, typically with uint8 channels, yet still requiring floating-point
* inference. For such image models, the uint8 input is quantized, i.e.
* the uint8 values are interpreted as real numbers, and the quantization
* parameters used for such input arrays are their mean_value, std_value
* parameters.
*
*
* optional .toco.IODataType inference_input_type = 11;
*/
public Builder clearInferenceInputType() {
bitField0_ = (bitField0_ & ~0x00000004);
inferenceInputType_ = 0;
onChanged();
return this;
}
private int inferenceType_ = 0;
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public boolean hasInferenceType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public toco.Types.IODataType getInferenceType() {
@SuppressWarnings("deprecation")
toco.Types.IODataType result = toco.Types.IODataType.valueOf(inferenceType_);
return result == null ? toco.Types.IODataType.IO_DATA_TYPE_UNKNOWN : result;
}
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public Builder setInferenceType(toco.Types.IODataType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
inferenceType_ = value.getNumber();
onChanged();
return this;
}
/**
*
* Sets the type of real-number arrays in the output file, that is, controls
* the representation (quantization) of real numbers in the output file,
* except for input arrays, which are controlled by inference_input_type.
* NOTE: this flag only impacts real-number arrays. By "real-number"
* we mean float arrays, and quantized arrays. This excludes plain
* integer arrays, strings arrays, and every other data type.
* For real-number arrays, the impact of this flag is to allow the output
* file to choose a different real-numbers representation (quantization)
* from what the input file used. For any other types of arrays, changing
* the data type would not make sense.
* Specifically:
* - If FLOAT, then real-numbers arrays will be of type float in
* the output file. If they were quantized in the input file, then
* they get dequantized.
* - If QUANTIZED_UINT8, then real-numbers arrays will be quantized
* as uint8 in the output file. If they were float in the input file,
* then they get quantized.
* - If not set, then all real-numbers arrays retain the same type in the
* output file as they have in the input file.
*
*
* optional .toco.IODataType inference_type = 4;
*/
public Builder clearInferenceType() {
bitField0_ = (bitField0_ & ~0x00000008);
inferenceType_ = 0;
onChanged();
return this;
}
private float defaultRangesMin_ ;
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public boolean hasDefaultRangesMin() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public float getDefaultRangesMin() {
return defaultRangesMin_;
}
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public Builder setDefaultRangesMin(float value) {
bitField0_ |= 0x00000010;
defaultRangesMin_ = value;
onChanged();
return this;
}
/**
*
* default_ranges_min and default_ranges_max are helpers to experiment
* with quantization of models. Normally, quantization requires the input
* model to have (min, max) range information for every activations array.
* This is needed in order to know how to quantize arrays and still achieve
* satisfactory accuracy. However, in some circumstances one would just like
* to estimate the performance of quantized inference, without caring about
* accuracy. That is what default_ranges_min and default_ranges_max are for:
* when specified, they will be used as default (min, max) range boundaries
* for all activation arrays that lack (min, max) range information, thus
* allowing for quantization to proceed.
* It should be clear from the above explanation that these parameters are
* for experimentation purposes only and should not be used in production:
* they make it easy to quantize models, but the resulting quantized model
* will be inaccurate.
* These values only apply to arrays quantized with the kUint8 data type.
*
*
* optional float default_ranges_min = 5;
*/
public Builder clearDefaultRangesMin() {
bitField0_ = (bitField0_ & ~0x00000010);
defaultRangesMin_ = 0F;
onChanged();
return this;
}
private float defaultRangesMax_ ;
/**
* optional float default_ranges_max = 6;
*/
public boolean hasDefaultRangesMax() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional float default_ranges_max = 6;
*/
public float getDefaultRangesMax() {
return defaultRangesMax_;
}
/**
* optional float default_ranges_max = 6;
*/
public Builder setDefaultRangesMax(float value) {
bitField0_ |= 0x00000020;
defaultRangesMax_ = value;
onChanged();
return this;
}
/**
* optional float default_ranges_max = 6;
*/
public Builder clearDefaultRangesMax() {
bitField0_ = (bitField0_ & ~0x00000020);
defaultRangesMax_ = 0F;
onChanged();
return this;
}
private float defaultInt16RangesMin_ ;
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public boolean hasDefaultInt16RangesMin() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public float getDefaultInt16RangesMin() {
return defaultInt16RangesMin_;
}
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public Builder setDefaultInt16RangesMin(float value) {
bitField0_ |= 0x00000040;
defaultInt16RangesMin_ = value;
onChanged();
return this;
}
/**
*
* Equivalent versions of default_ranges_min/_max for arrays quantized with
* the kInt16 data type.
*
*
* optional float default_int16_ranges_min = 15;
*/
public Builder clearDefaultInt16RangesMin() {
bitField0_ = (bitField0_ & ~0x00000040);
defaultInt16RangesMin_ = 0F;
onChanged();
return this;
}
private float defaultInt16RangesMax_ ;
/**
* optional float default_int16_ranges_max = 16;
*/
public boolean hasDefaultInt16RangesMax() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional float default_int16_ranges_max = 16;
*/
public float getDefaultInt16RangesMax() {
return defaultInt16RangesMax_;
}
/**
* optional float default_int16_ranges_max = 16;
*/
public Builder setDefaultInt16RangesMax(float value) {
bitField0_ |= 0x00000080;
defaultInt16RangesMax_ = value;
onChanged();
return this;
}
/**
* optional float default_int16_ranges_max = 16;
*/
public Builder clearDefaultInt16RangesMax() {
bitField0_ = (bitField0_ & ~0x00000080);
defaultInt16RangesMax_ = 0F;
onChanged();
return this;
}
private boolean dropFakeQuant_ ;
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public boolean hasDropFakeQuant() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public boolean getDropFakeQuant() {
return dropFakeQuant_;
}
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public Builder setDropFakeQuant(boolean value) {
bitField0_ |= 0x00000100;
dropFakeQuant_ = value;
onChanged();
return this;
}
/**
*
* Ignore and discard FakeQuant nodes. For instance, that can be used to
* generate plain float code without fake-quantization from a quantized
* graph.
*
*
* optional bool drop_fake_quant = 7;
*/
public Builder clearDropFakeQuant() {
bitField0_ = (bitField0_ & ~0x00000100);
dropFakeQuant_ = false;
onChanged();
return this;
}
private boolean reorderAcrossFakeQuant_ ;
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public boolean hasReorderAcrossFakeQuant() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public boolean getReorderAcrossFakeQuant() {
return reorderAcrossFakeQuant_;
}
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public Builder setReorderAcrossFakeQuant(boolean value) {
bitField0_ |= 0x00000200;
reorderAcrossFakeQuant_ = value;
onChanged();
return this;
}
/**
*
* Normally, FakeQuant nodes must be strict boundaries for graph
* transformations, in order to ensure that quantized inference has the
* exact same arithmetic behavior as quantized training --- which is the
* whole point of quantized training and of FakeQuant nodes in the first
* place. However, that entails subtle requirements on where exactly
* FakeQuant nodes must be placed in the graph. Some quantized graphs
* have FakeQuant nodes at unexpected locations, that prevent graph
* transformations that are necessary in order to generate inference
* code for these graphs. Such graphs should be fixed, but as a
* temporary work-around, setting this reorder_across_fake_quant flag
* allows toco to perform necessary graph transformations on them,
* at the cost of no longer faithfully matching inference and training
* arithmetic.
*
*
* optional bool reorder_across_fake_quant = 8;
*/
public Builder clearReorderAcrossFakeQuant() {
bitField0_ = (bitField0_ & ~0x00000200);
reorderAcrossFakeQuant_ = false;
onChanged();
return this;
}
private boolean allowCustomOps_ ;
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public boolean hasAllowCustomOps() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public boolean getAllowCustomOps() {
return allowCustomOps_;
}
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public Builder setAllowCustomOps(boolean value) {
bitField0_ |= 0x00000400;
allowCustomOps_ = value;
onChanged();
return this;
}
/**
*
* If true, allow TOCO to create TF Lite Custom operators for all the
* unsupported Tensorflow ops.
*
*
* optional bool allow_custom_ops = 10;
*/
public Builder clearAllowCustomOps() {
bitField0_ = (bitField0_ & ~0x00000400);
allowCustomOps_ = false;
onChanged();
return this;
}
private boolean dropControlDependency_ ;
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public boolean hasDropControlDependency() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public boolean getDropControlDependency() {
return dropControlDependency_;
}
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public Builder setDropControlDependency(boolean value) {
bitField0_ |= 0x00000800;
dropControlDependency_ = value;
onChanged();
return this;
}
/**
*
* Applies only to the case when the input format is TENSORFLOW_GRAPHDEF.
* If true, then control dependencies will be immediately dropped during
* import.
* If not set, the default behavior is as follows:
* - Default to false if the output format is TENSORFLOW_GRAPHDEF.
* - Default to true in all other cases.
*
*
* optional bool drop_control_dependency = 12;
*/
public Builder clearDropControlDependency() {
bitField0_ = (bitField0_ & ~0x00000800);
dropControlDependency_ = false;
onChanged();
return this;
}
private boolean debugDisableRecurrentCellFusion_ ;
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public boolean hasDebugDisableRecurrentCellFusion() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public boolean getDebugDisableRecurrentCellFusion() {
return debugDisableRecurrentCellFusion_;
}
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public Builder setDebugDisableRecurrentCellFusion(boolean value) {
bitField0_ |= 0x00001000;
debugDisableRecurrentCellFusion_ = value;
onChanged();
return this;
}
/**
*
* Disables transformations that fuse subgraphs such as known LSTMs (not all
* LSTMs are identified).
*
*
* optional bool debug_disable_recurrent_cell_fusion = 13;
*/
public Builder clearDebugDisableRecurrentCellFusion() {
bitField0_ = (bitField0_ & ~0x00001000);
debugDisableRecurrentCellFusion_ = false;
onChanged();
return this;
}
private boolean propagateFakeQuantNumBits_ ;
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public boolean hasPropagateFakeQuantNumBits() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public boolean getPropagateFakeQuantNumBits() {
return propagateFakeQuantNumBits_;
}
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public Builder setPropagateFakeQuantNumBits(boolean value) {
bitField0_ |= 0x00002000;
propagateFakeQuantNumBits_ = value;
onChanged();
return this;
}
/**
*
* Uses the FakeQuantWithMinMaxArgs.num_bits attribute to adjust quantized
* array data types throughout the graph. The graph must be properly annotated
* with FakeQuant* ops on at least the edges and may contain additional ops on
* the interior of the graph to widen/narrow as desired.
* Input and output array data types may change because of this propagation
* and users must be sure to query the final data_type values.
*
*
* optional bool propagate_fake_quant_num_bits = 14;
*/
public Builder clearPropagateFakeQuantNumBits() {
bitField0_ = (bitField0_ & ~0x00002000);
propagateFakeQuantNumBits_ = false;
onChanged();
return this;
}
private boolean allowNudgingWeightsToUseFastGemmKernel_ ;
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public boolean hasAllowNudgingWeightsToUseFastGemmKernel() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public boolean getAllowNudgingWeightsToUseFastGemmKernel() {
return allowNudgingWeightsToUseFastGemmKernel_;
}
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public Builder setAllowNudgingWeightsToUseFastGemmKernel(boolean value) {
bitField0_ |= 0x00004000;
allowNudgingWeightsToUseFastGemmKernel_ = value;
onChanged();
return this;
}
/**
*
* Some fast uint8 GEMM kernels require uint8 weights to avoid the value 0.
* This flag allows nudging them to 1 to allow proceeding, with moderate
* inaccuracy.
*
*
* optional bool allow_nudging_weights_to_use_fast_gemm_kernel = 17;
*/
public Builder clearAllowNudgingWeightsToUseFastGemmKernel() {
bitField0_ = (bitField0_ & ~0x00004000);
allowNudgingWeightsToUseFastGemmKernel_ = false;
onChanged();
return this;
}
private long dedupeArrayMinSizeBytes_ = 64L;
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public boolean hasDedupeArrayMinSizeBytes() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public long getDedupeArrayMinSizeBytes() {
return dedupeArrayMinSizeBytes_;
}
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public Builder setDedupeArrayMinSizeBytes(long value) {
bitField0_ |= 0x00008000;
dedupeArrayMinSizeBytes_ = value;
onChanged();
return this;
}
/**
*
* Minimum size of constant arrays to deduplicate; arrays smaller will not be
* deduplicated.
*
*
* optional int64 dedupe_array_min_size_bytes = 18 [default = 64];
*/
public Builder clearDedupeArrayMinSizeBytes() {
bitField0_ = (bitField0_ & ~0x00008000);
dedupeArrayMinSizeBytes_ = 64L;
onChanged();
return this;
}
private boolean splitTfliteLstmInputs_ = true;
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public boolean hasSplitTfliteLstmInputs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public boolean getSplitTfliteLstmInputs() {
return splitTfliteLstmInputs_;
}
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public Builder setSplitTfliteLstmInputs(boolean value) {
bitField0_ |= 0x00010000;
splitTfliteLstmInputs_ = value;
onChanged();
return this;
}
/**
*
* Split the LSTM inputs from 5 tensors to 18 tensors for TFLite.
* Ignored if the output format is not TFLite.
*
*
* optional bool split_tflite_lstm_inputs = 19 [default = true];
*/
public Builder clearSplitTfliteLstmInputs() {
bitField0_ = (bitField0_ & ~0x00010000);
splitTfliteLstmInputs_ = true;
onChanged();
return this;
}
private boolean quantizeWeights_ ;
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public boolean hasQuantizeWeights() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public boolean getQuantizeWeights() {
return quantizeWeights_;
}
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public Builder setQuantizeWeights(boolean value) {
bitField0_ |= 0x00020000;
quantizeWeights_ = value;
onChanged();
return this;
}
/**
*
* Store weights as quantized weights followed by dequantize operations.
* Computation is still done in float, but reduces model size (at the cost of
* accuracy and latency).
* DEPRECATED: Please use post_training_quantize instead.
*
*
* optional bool quantize_weights = 20 [default = false];
*/
public Builder clearQuantizeWeights() {
bitField0_ = (bitField0_ & ~0x00020000);
quantizeWeights_ = false;
onChanged();
return this;
}
private java.lang.Object dumpGraphvizDir_ = "";
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public boolean hasDumpGraphvizDir() {
return ((bitField0_ & 0x00040000) == 0x00040000);
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public java.lang.String getDumpGraphvizDir() {
java.lang.Object ref = dumpGraphvizDir_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
dumpGraphvizDir_ = s;
}
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public com.google.protobuf.ByteString
getDumpGraphvizDirBytes() {
java.lang.Object ref = dumpGraphvizDir_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
dumpGraphvizDir_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public Builder setDumpGraphvizDir(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00040000;
dumpGraphvizDir_ = value;
onChanged();
return this;
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public Builder clearDumpGraphvizDir() {
bitField0_ = (bitField0_ & ~0x00040000);
dumpGraphvizDir_ = getDefaultInstance().getDumpGraphvizDir();
onChanged();
return this;
}
/**
*
* Full filepath of folder to dump the graphs at various stages of processing
* GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order
* to keep the requirements of the output file.
*
*
* optional string dump_graphviz_dir = 24;
*/
public Builder setDumpGraphvizDirBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00040000;
dumpGraphvizDir_ = value;
onChanged();
return this;
}
private boolean dumpGraphvizIncludeVideo_ ;
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public boolean hasDumpGraphvizIncludeVideo() {
return ((bitField0_ & 0x00080000) == 0x00080000);
}
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public boolean getDumpGraphvizIncludeVideo() {
return dumpGraphvizIncludeVideo_;
}
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public Builder setDumpGraphvizIncludeVideo(boolean value) {
bitField0_ |= 0x00080000;
dumpGraphvizIncludeVideo_ = value;
onChanged();
return this;
}
/**
*
* Boolean indicating whether to dump the graph after every graph
* transformation.
*
*
* optional bool dump_graphviz_include_video = 25;
*/
public Builder clearDumpGraphvizIncludeVideo() {
bitField0_ = (bitField0_ & ~0x00080000);
dumpGraphvizIncludeVideo_ = false;
onChanged();
return this;
}
private boolean postTrainingQuantize_ ;
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public boolean hasPostTrainingQuantize() {
return ((bitField0_ & 0x00100000) == 0x00100000);
}
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public boolean getPostTrainingQuantize() {
return postTrainingQuantize_;
}
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public Builder setPostTrainingQuantize(boolean value) {
bitField0_ |= 0x00100000;
postTrainingQuantize_ = value;
onChanged();
return this;
}
/**
*
* Boolean indicating whether to quantize the weights of the converted float
* model. Model size will be reduced and there will be latency improvements
* (at the cost of accuracy).
*
*
* optional bool post_training_quantize = 26 [default = false];
*/
public Builder clearPostTrainingQuantize() {
bitField0_ = (bitField0_ & ~0x00100000);
postTrainingQuantize_ = false;
onChanged();
return this;
}
private boolean allowFlexOps_ ;
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public boolean hasAllowFlexOps() {
return ((bitField0_ & 0x00200000) == 0x00200000);
}
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public boolean getAllowFlexOps() {
return allowFlexOps_;
}
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public Builder setAllowFlexOps(boolean value) {
bitField0_ |= 0x00200000;
allowFlexOps_ = value;
onChanged();
return this;
}
/**
*
* When enabled, unsupported ops will be converted to TFLite Flex ops.
* TODO(ycling): Consider to rename the following 2 flags and don't call it
* "Flex".
* `allow_flex_ops` should always be used with `allow_custom_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool allow_flex_ops = 27 [default = false];
*/
public Builder clearAllowFlexOps() {
bitField0_ = (bitField0_ & ~0x00200000);
allowFlexOps_ = false;
onChanged();
return this;
}
private boolean forceFlexOps_ ;
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public boolean hasForceFlexOps() {
return ((bitField0_ & 0x00400000) == 0x00400000);
}
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public boolean getForceFlexOps() {
return forceFlexOps_;
}
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public Builder setForceFlexOps(boolean value) {
bitField0_ |= 0x00400000;
forceFlexOps_ = value;
onChanged();
return this;
}
/**
*
* When enabled, all TensorFlow ops will be converted to TFLite Flex
* ops directly. This will force `allow_flex_ops` to true.
* `force_flex_ops` should always be used with `allow_flex_ops`.
* WARNING: Experimental interface, subject to change
*
*
* optional bool force_flex_ops = 28 [default = false];
*/
public Builder clearForceFlexOps() {
bitField0_ = (bitField0_ & ~0x00400000);
forceFlexOps_ = false;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:toco.TocoFlags)
}
// @@protoc_insertion_point(class_scope:toco.TocoFlags)
private static final toco.TocoFlagsOuterClass.TocoFlags DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new toco.TocoFlagsOuterClass.TocoFlags();
}
public static toco.TocoFlagsOuterClass.TocoFlags getDefaultInstance() {
return DEFAULT_INSTANCE;
}
@java.lang.Deprecated public static final com.google.protobuf.Parser
PARSER = new com.google.protobuf.AbstractParser() {
@java.lang.Override
public TocoFlags parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TocoFlags(input, extensionRegistry);
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public toco.TocoFlagsOuterClass.TocoFlags getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
private static final com.google.protobuf.Descriptors.Descriptor
internal_static_toco_TocoFlags_descriptor;
private static final
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_toco_TocoFlags_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n-tensorflow/contrib/lite/toco/toco_flag" +
"s.proto\022\004toco\032(tensorflow/contrib/lite/t" +
"oco/types.proto\"\313\006\n\tTocoFlags\022&\n\014input_f" +
"ormat\030\001 \001(\0162\020.toco.FileFormat\022\'\n\routput_" +
"format\030\002 \001(\0162\020.toco.FileFormat\022.\n\024infere" +
"nce_input_type\030\013 \001(\0162\020.toco.IODataType\022(" +
"\n\016inference_type\030\004 \001(\0162\020.toco.IODataType" +
"\022\032\n\022default_ranges_min\030\005 \001(\002\022\032\n\022default_" +
"ranges_max\030\006 \001(\002\022 \n\030default_int16_ranges" +
"_min\030\017 \001(\002\022 \n\030default_int16_ranges_max\030\020" +
" \001(\002\022\027\n\017drop_fake_quant\030\007 \001(\010\022!\n\031reorder" +
"_across_fake_quant\030\010 \001(\010\022\030\n\020allow_custom" +
"_ops\030\n \001(\010\022\037\n\027drop_control_dependency\030\014 " +
"\001(\010\022+\n#debug_disable_recurrent_cell_fusi" +
"on\030\r \001(\010\022%\n\035propagate_fake_quant_num_bit" +
"s\030\016 \001(\010\0225\n-allow_nudging_weights_to_use_" +
"fast_gemm_kernel\030\021 \001(\010\022\'\n\033dedupe_array_m" +
"in_size_bytes\030\022 \001(\003:\00264\022&\n\030split_tflite_" +
"lstm_inputs\030\023 \001(\010:\004true\022\037\n\020quantize_weig" +
"hts\030\024 \001(\010:\005false\022\031\n\021dump_graphviz_dir\030\030 " +
"\001(\t\022#\n\033dump_graphviz_include_video\030\031 \001(\010" +
"\022%\n\026post_training_quantize\030\032 \001(\010:\005false\022" +
"\035\n\016allow_flex_ops\030\033 \001(\010:\005false\022\035\n\016force_" +
"flex_ops\030\034 \001(\010:\005false*\\\n\nFileFormat\022\027\n\023F" +
"ILE_FORMAT_UNKNOWN\020\000\022\027\n\023TENSORFLOW_GRAPH" +
"DEF\020\001\022\n\n\006TFLITE\020\002\022\020\n\014GRAPHVIZ_DOT\020\003"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
toco.Types.getDescriptor(),
}, assigner);
internal_static_toco_TocoFlags_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_toco_TocoFlags_fieldAccessorTable = new
com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_toco_TocoFlags_descriptor,
new java.lang.String[] { "InputFormat", "OutputFormat", "InferenceInputType", "InferenceType", "DefaultRangesMin", "DefaultRangesMax", "DefaultInt16RangesMin", "DefaultInt16RangesMax", "DropFakeQuant", "ReorderAcrossFakeQuant", "AllowCustomOps", "DropControlDependency", "DebugDisableRecurrentCellFusion", "PropagateFakeQuantNumBits", "AllowNudgingWeightsToUseFastGemmKernel", "DedupeArrayMinSizeBytes", "SplitTfliteLstmInputs", "QuantizeWeights", "DumpGraphvizDir", "DumpGraphvizIncludeVideo", "PostTrainingQuantize", "AllowFlexOps", "ForceFlexOps", });
toco.Types.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy