Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
* L1 is the default level.
* Optimization performed at L1 :
* 1. Common subexpression elimination
* 2. Constant folding
*
*
* L1 = 0;
*/
L1(0),
/**
*
* No optimizations
*
*
* L0 = -1;
*/
L0(-1),
UNRECOGNIZED(-1),
;
/**
*
* L1 is the default level.
* Optimization performed at L1 :
* 1. Common subexpression elimination
* 2. Constant folding
*
*
* L1 = 0;
*/
public static final int L1_VALUE = 0;
/**
*
* No optimizations
*
*
* L0 = -1;
*/
public static final int L0_VALUE = -1;
public final int getNumber() {
if (this == UNRECOGNIZED) {
throw new java.lang.IllegalArgumentException(
"Can't get the number of an unknown enum value.");
}
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static Level valueOf(int value) {
return forNumber(value);
}
public static Level forNumber(int value) {
switch (value) {
case 0: return L1;
case -1: return L0;
default: return null;
}
}
public static org.nd4j.shade.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.nd4j.shade.protobuf.Internal.EnumLiteMap<
Level> internalValueMap =
new org.nd4j.shade.protobuf.Internal.EnumLiteMap() {
public Level findValueByNumber(int number) {
return Level.forNumber(number);
}
};
public final org.nd4j.shade.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.nd4j.shade.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.nd4j.shade.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.tensorflow.framework.OptimizerOptions.getDescriptor().getEnumTypes().get(0);
}
private static final Level[] VALUES = values();
public static Level valueOf(
org.nd4j.shade.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
if (desc.getIndex() == -1) {
return UNRECOGNIZED;
}
return VALUES[desc.getIndex()];
}
private final int value;
private Level(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:tensorflow.OptimizerOptions.Level)
}
/**
*
* Control the use of the compiler/jit. Experimental.
*
* The following settings turn on compilation, with higher values being
* more aggressive. Higher values may reduce opportunities for parallelism
* and may use more memory. (At present, there is no distinction, but this
* is expected to change.)
*
* Default setting ("off" now, but later expected to be "on")
*
*
* DEFAULT = 0;
*/
public static final int DEFAULT_VALUE = 0;
/**
* OFF = -1;
*/
public static final int OFF_VALUE = -1;
/**
*
* The following settings turn on compilation, with higher values being
* more aggressive. Higher values may reduce opportunities for parallelism
* and may use more memory. (At present, there is no distinction, but this
* is expected to change.)
*
*
* ON_1 = 1;
*/
public static final int ON_1_VALUE = 1;
/**
* ON_2 = 2;
*/
public static final int ON_2_VALUE = 2;
public final int getNumber() {
if (this == UNRECOGNIZED) {
throw new java.lang.IllegalArgumentException(
"Can't get the number of an unknown enum value.");
}
return value;
}
/**
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static GlobalJitLevel valueOf(int value) {
return forNumber(value);
}
public static GlobalJitLevel forNumber(int value) {
switch (value) {
case 0: return DEFAULT;
case -1: return OFF;
case 1: return ON_1;
case 2: return ON_2;
default: return null;
}
}
public static org.nd4j.shade.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static final org.nd4j.shade.protobuf.Internal.EnumLiteMap<
GlobalJitLevel> internalValueMap =
new org.nd4j.shade.protobuf.Internal.EnumLiteMap() {
public GlobalJitLevel findValueByNumber(int number) {
return GlobalJitLevel.forNumber(number);
}
};
public final org.nd4j.shade.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(ordinal());
}
public final org.nd4j.shade.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final org.nd4j.shade.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.tensorflow.framework.OptimizerOptions.getDescriptor().getEnumTypes().get(1);
}
private static final GlobalJitLevel[] VALUES = values();
public static GlobalJitLevel valueOf(
org.nd4j.shade.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
if (desc.getIndex() == -1) {
return UNRECOGNIZED;
}
return VALUES[desc.getIndex()];
}
private final int value;
private GlobalJitLevel(int value) {
this.value = value;
}
// @@protoc_insertion_point(enum_scope:tensorflow.OptimizerOptions.GlobalJitLevel)
}
public static final int DO_COMMON_SUBEXPRESSION_ELIMINATION_FIELD_NUMBER = 1;
private boolean doCommonSubexpressionElimination_;
/**
*
* If true, optimize the graph using common subexpression elimination.
*
*
* bool do_common_subexpression_elimination = 1;
*/
public boolean getDoCommonSubexpressionElimination() {
return doCommonSubexpressionElimination_;
}
public static final int DO_CONSTANT_FOLDING_FIELD_NUMBER = 2;
private boolean doConstantFolding_;
/**
*
* If true, perform constant folding optimization on the graph.
*
*
* bool do_constant_folding = 2;
*/
public boolean getDoConstantFolding() {
return doConstantFolding_;
}
public static final int MAX_FOLDED_CONSTANT_IN_BYTES_FIELD_NUMBER = 6;
private long maxFoldedConstantInBytes_;
/**
*
* Constant folding optimization replaces tensors whose values can be
* predetermined, with constant nodes. To avoid inserting too large constants,
* the size of each constant created can be limited. If this value is zero, a
* default limit of 10 MiB will be applied. If constant folding optimization
* is disabled, this value is ignored.
*
*
* int64 max_folded_constant_in_bytes = 6;
*/
public long getMaxFoldedConstantInBytes() {
return maxFoldedConstantInBytes_;
}
public static final int DO_FUNCTION_INLINING_FIELD_NUMBER = 4;
private boolean doFunctionInlining_;
/**
*
* If true, perform function inlining on the graph.
*
*
* bool do_function_inlining = 4;
*/
public boolean getDoFunctionInlining() {
return doFunctionInlining_;
}
public static final int OPT_LEVEL_FIELD_NUMBER = 3;
private int optLevel_;
/**
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
*
* .tensorflow.OptimizerOptions.Level opt_level = 3;
*/
public int getOptLevelValue() {
return optLevel_;
}
/**
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
* Constant folding optimization replaces tensors whose values can be
* predetermined, with constant nodes. To avoid inserting too large constants,
* the size of each constant created can be limited. If this value is zero, a
* default limit of 10 MiB will be applied. If constant folding optimization
* is disabled, this value is ignored.
*
*
* int64 max_folded_constant_in_bytes = 6;
*/
public long getMaxFoldedConstantInBytes() {
return maxFoldedConstantInBytes_;
}
/**
*
* Constant folding optimization replaces tensors whose values can be
* predetermined, with constant nodes. To avoid inserting too large constants,
* the size of each constant created can be limited. If this value is zero, a
* default limit of 10 MiB will be applied. If constant folding optimization
* is disabled, this value is ignored.
*
* Constant folding optimization replaces tensors whose values can be
* predetermined, with constant nodes. To avoid inserting too large constants,
* the size of each constant created can be limited. If this value is zero, a
* default limit of 10 MiB will be applied. If constant folding optimization
* is disabled, this value is ignored.
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
*
* .tensorflow.OptimizerOptions.Level opt_level = 3;
*/
public int getOptLevelValue() {
return optLevel_;
}
/**
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
*
* .tensorflow.OptimizerOptions.Level opt_level = 3;
*/
public org.tensorflow.framework.OptimizerOptions.Level getOptLevel() {
@SuppressWarnings("deprecation")
org.tensorflow.framework.OptimizerOptions.Level result = org.tensorflow.framework.OptimizerOptions.Level.valueOf(optLevel_);
return result == null ? org.tensorflow.framework.OptimizerOptions.Level.UNRECOGNIZED : result;
}
/**
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
*
* .tensorflow.OptimizerOptions.Level opt_level = 3;
*/
public Builder setOptLevel(org.tensorflow.framework.OptimizerOptions.Level value) {
if (value == null) {
throw new NullPointerException();
}
optLevel_ = value.getNumber();
onChanged();
return this;
}
/**
*
* Overall optimization level. The actual optimizations applied will be the
* logical OR of the flags that this level implies and any flags already set.
*
*
* .tensorflow.OptimizerOptions.Level opt_level = 3;
*/
public Builder clearOptLevel() {
optLevel_ = 0;
onChanged();
return this;
}
private int globalJitLevel_ = 0;
/**
* .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
*/
public int getGlobalJitLevelValue() {
return globalJitLevel_;
}
/**
* .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
*/
public Builder setGlobalJitLevelValue(int value) {
globalJitLevel_ = value;
onChanged();
return this;
}
/**
* .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
*/
public org.tensorflow.framework.OptimizerOptions.GlobalJitLevel getGlobalJitLevel() {
@SuppressWarnings("deprecation")
org.tensorflow.framework.OptimizerOptions.GlobalJitLevel result = org.tensorflow.framework.OptimizerOptions.GlobalJitLevel.valueOf(globalJitLevel_);
return result == null ? org.tensorflow.framework.OptimizerOptions.GlobalJitLevel.UNRECOGNIZED : result;
}
/**
* .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
*/
public Builder setGlobalJitLevel(org.tensorflow.framework.OptimizerOptions.GlobalJitLevel value) {
if (value == null) {
throw new NullPointerException();
}
globalJitLevel_ = value.getNumber();
onChanged();
return this;
}
/**
* .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;
*/
public Builder clearGlobalJitLevel() {
globalJitLevel_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final org.nd4j.shade.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:tensorflow.OptimizerOptions)
}
// @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions)
private static final org.tensorflow.framework.OptimizerOptions DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new org.tensorflow.framework.OptimizerOptions();
}
public static org.tensorflow.framework.OptimizerOptions getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final org.nd4j.shade.protobuf.Parser
PARSER = new org.nd4j.shade.protobuf.AbstractParser() {
@java.lang.Override
public OptimizerOptions parsePartialFrom(
org.nd4j.shade.protobuf.CodedInputStream input,
org.nd4j.shade.protobuf.ExtensionRegistryLite extensionRegistry)
throws org.nd4j.shade.protobuf.InvalidProtocolBufferException {
return new OptimizerOptions(input, extensionRegistry);
}
};
public static org.nd4j.shade.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public org.nd4j.shade.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public org.tensorflow.framework.OptimizerOptions getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}