All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.tensorflow.framework.RewriterConfigOrBuilder Maven / Gradle / Ivy

There is a newer version: 1.15.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/protobuf/rewriter_config.proto

package org.tensorflow.framework;

public interface RewriterConfigOrBuilder extends
    // @@protoc_insertion_point(interface_extends:tensorflow.RewriterConfig)
    com.google.protobuf.MessageOrBuilder {

  /**
   * 
   * Optimize tensor layouts (default is ON)
   * e.g. This will try to use NCHW layout on GPU which is faster.
   * 
* * .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; */ int getLayoutOptimizerValue(); /** *
   * Optimize tensor layouts (default is ON)
   * e.g. This will try to use NCHW layout on GPU which is faster.
   * 
* * .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; */ org.tensorflow.framework.RewriterConfig.Toggle getLayoutOptimizer(); /** *
   * Fold constants (default is ON)
   * Statically infer the value of tensors when possible, and materialize the
   * result using constants.
   * 
* * .tensorflow.RewriterConfig.Toggle constant_folding = 3; */ int getConstantFoldingValue(); /** *
   * Fold constants (default is ON)
   * Statically infer the value of tensors when possible, and materialize the
   * result using constants.
   * 
* * .tensorflow.RewriterConfig.Toggle constant_folding = 3; */ org.tensorflow.framework.RewriterConfig.Toggle getConstantFolding(); /** *
   * Shape optimizations (default is ON)
   * Simplify computations made on shapes.
   * 
* * .tensorflow.RewriterConfig.Toggle shape_optimization = 13; */ int getShapeOptimizationValue(); /** *
   * Shape optimizations (default is ON)
   * Simplify computations made on shapes.
   * 
* * .tensorflow.RewriterConfig.Toggle shape_optimization = 13; */ org.tensorflow.framework.RewriterConfig.Toggle getShapeOptimization(); /** *
   * Remapping (default is ON)
   * Remap subgraphs onto more efficient implementations.
   * 
* * .tensorflow.RewriterConfig.Toggle remapping = 14; */ int getRemappingValue(); /** *
   * Remapping (default is ON)
   * Remap subgraphs onto more efficient implementations.
   * 
* * .tensorflow.RewriterConfig.Toggle remapping = 14; */ org.tensorflow.framework.RewriterConfig.Toggle getRemapping(); /** *
   * Arithmetic optimizations (default is ON)
   * e.g. Simplify arithmetic ops; merge ops with same value (like constants).
   * 
* * .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; */ int getArithmeticOptimizationValue(); /** *
   * Arithmetic optimizations (default is ON)
   * e.g. Simplify arithmetic ops; merge ops with same value (like constants).
   * 
* * .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; */ org.tensorflow.framework.RewriterConfig.Toggle getArithmeticOptimization(); /** *
   * Control dependency optimizations (default is ON).
   * Remove redundant control dependencies, which may enable other optimization.
   * 
* * .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; */ int getDependencyOptimizationValue(); /** *
   * Control dependency optimizations (default is ON).
   * Remove redundant control dependencies, which may enable other optimization.
   * 
* * .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; */ org.tensorflow.framework.RewriterConfig.Toggle getDependencyOptimization(); /** *
   * Loop optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle loop_optimization = 9; */ int getLoopOptimizationValue(); /** *
   * Loop optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle loop_optimization = 9; */ org.tensorflow.framework.RewriterConfig.Toggle getLoopOptimization(); /** *
   * Function optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle function_optimization = 10; */ int getFunctionOptimizationValue(); /** *
   * Function optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle function_optimization = 10; */ org.tensorflow.framework.RewriterConfig.Toggle getFunctionOptimization(); /** *
   * Strips debug-related nodes from the graph (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle debug_stripper = 11; */ int getDebugStripperValue(); /** *
   * Strips debug-related nodes from the graph (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle debug_stripper = 11; */ org.tensorflow.framework.RewriterConfig.Toggle getDebugStripper(); /** *
   * If true, don't remove unnecessary ops from the graph
   * 
* * bool disable_model_pruning = 2; */ boolean getDisableModelPruning(); /** *
   * Try to allocate some independent Op outputs contiguously in order to
   * merge or eliminate downstream Ops (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; */ int getScopedAllocatorOptimizationValue(); /** *
   * Try to allocate some independent Op outputs contiguously in order to
   * merge or eliminate downstream Ops (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; */ org.tensorflow.framework.RewriterConfig.Toggle getScopedAllocatorOptimization(); /** *
   * Force small ops onto the CPU (default is OFF).
   * 
* * .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18; */ int getPinToHostOptimizationValue(); /** *
   * Force small ops onto the CPU (default is OFF).
   * 
* * .tensorflow.RewriterConfig.Toggle pin_to_host_optimization = 18; */ org.tensorflow.framework.RewriterConfig.Toggle getPinToHostOptimization(); /** *
   * Enable the swap of kernel implementations based on the device placement
   * (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle implementation_selector = 22; */ int getImplementationSelectorValue(); /** *
   * Enable the swap of kernel implementations based on the device placement
   * (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle implementation_selector = 22; */ org.tensorflow.framework.RewriterConfig.Toggle getImplementationSelector(); /** *
   * Optimize data types (default is OFF).
   * e.g., This will try to use float16 on GPU which is faster.
   * Note that this can change the numerical stability of the graph and may
   * require the use of loss scaling to maintain model convergence.
   * 
* * .tensorflow.RewriterConfig.Toggle auto_mixed_precision = 23; */ int getAutoMixedPrecisionValue(); /** *
   * Optimize data types (default is OFF).
   * e.g., This will try to use float16 on GPU which is faster.
   * Note that this can change the numerical stability of the graph and may
   * require the use of loss scaling to maintain model convergence.
   * 
* * .tensorflow.RewriterConfig.Toggle auto_mixed_precision = 23; */ org.tensorflow.framework.RewriterConfig.Toggle getAutoMixedPrecision(); /** *
   * Disable the entire meta optimizer (off by default).
   * 
* * bool disable_meta_optimizer = 19; */ boolean getDisableMetaOptimizer(); /** *
   * Controls how many times we run the optimizers in meta optimizer (default
   * is once).
   * 
* * .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; */ int getMetaOptimizerIterationsValue(); /** *
   * Controls how many times we run the optimizers in meta optimizer (default
   * is once).
   * 
* * .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; */ org.tensorflow.framework.RewriterConfig.NumIterationsType getMetaOptimizerIterations(); /** *
   * The minimum number of nodes in a graph to optimizer. For smaller graphs,
   * optimization is skipped.
   * 0 means the system picks an appropriate number.
   * < 0 means do not skip optimization.
   * 
* * int32 min_graph_nodes = 17; */ int getMinGraphNodes(); /** *
   * Configures memory optimization passes through the meta-optimizer. Has no
   * effect on manually requested memory optimization passes in the optimizers
   * field.
   * 
* * .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; */ int getMemoryOptimizationValue(); /** *
   * Configures memory optimization passes through the meta-optimizer. Has no
   * effect on manually requested memory optimization passes in the optimizers
   * field.
   * 
* * .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; */ org.tensorflow.framework.RewriterConfig.MemOptType getMemoryOptimization(); /** *
   * A node name scope for node names which are valid outputs of recompuations.
   * Inputs to nodes that match this scope may be recomputed (subject either to
   * manual annotation of those input nodes or to manual annotation and
   * heuristics depending on memory_optimization), but the nodes themselves will
   * not be recomputed. This matches any sub-scopes as well, meaning the scope
   * can appear not just as a top-level scope. For example, if the value is
   * "gradients/", the default, it will match node name "gradients/foo",
   * "foo/gradients/bar", but not "foo_gradients/"
   * 
* * string memory_optimizer_target_node_name_scope = 6; */ java.lang.String getMemoryOptimizerTargetNodeNameScope(); /** *
   * A node name scope for node names which are valid outputs of recompuations.
   * Inputs to nodes that match this scope may be recomputed (subject either to
   * manual annotation of those input nodes or to manual annotation and
   * heuristics depending on memory_optimization), but the nodes themselves will
   * not be recomputed. This matches any sub-scopes as well, meaning the scope
   * can appear not just as a top-level scope. For example, if the value is
   * "gradients/", the default, it will match node name "gradients/foo",
   * "foo/gradients/bar", but not "foo_gradients/"
   * 
* * string memory_optimizer_target_node_name_scope = 6; */ com.google.protobuf.ByteString getMemoryOptimizerTargetNodeNameScopeBytes(); /** *
   * Maximum number of milliseconds to spend optimizing a single graph before
   * timing out. If equal to 0 the system picks a default (currently 5 minutes).
   * If less than 0 the optimizer will never time out.
   * 
* * int64 meta_optimizer_timeout_ms = 20; */ long getMetaOptimizerTimeoutMs(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; */ boolean hasAutoParallel(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; */ org.tensorflow.framework.AutoParallelOptions getAutoParallel(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; */ org.tensorflow.framework.AutoParallelOptionsOrBuilder getAutoParallelOrBuilder(); /** *
   * If true, any optimization pass failing will cause the MetaOptimizer to
   * stop with an error. By default - or when set to false, failing passes are
   * skipped silently.
   * 
* * bool fail_on_optimizer_errors = 21; */ boolean getFailOnOptimizerErrors(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; */ boolean hasScopedAllocatorOpts(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; */ org.tensorflow.framework.ScopedAllocatorOptions getScopedAllocatorOpts(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; */ org.tensorflow.framework.ScopedAllocatorOptionsOrBuilder getScopedAllocatorOptsOrBuilder(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom optimizers (see custom_optimizers) that are not part of this
   * schedule will be run after - in the order that they were specified.
   * 
* * repeated string optimizers = 100; */ java.util.List getOptimizersList(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom optimizers (see custom_optimizers) that are not part of this
   * schedule will be run after - in the order that they were specified.
   * 
* * repeated string optimizers = 100; */ int getOptimizersCount(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom optimizers (see custom_optimizers) that are not part of this
   * schedule will be run after - in the order that they were specified.
   * 
* * repeated string optimizers = 100; */ java.lang.String getOptimizers(int index); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom optimizers (see custom_optimizers) that are not part of this
   * schedule will be run after - in the order that they were specified.
   * 
* * repeated string optimizers = 100; */ com.google.protobuf.ByteString getOptimizersBytes(int index); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ java.util.List getCustomOptimizersList(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ org.tensorflow.framework.RewriterConfig.CustomGraphOptimizer getCustomOptimizers(int index); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ int getCustomOptimizersCount(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ java.util.List getCustomOptimizersOrBuilderList(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ org.tensorflow.framework.RewriterConfig.CustomGraphOptimizerOrBuilder getCustomOptimizersOrBuilder( int index); /** *
   * VerifierConfig specifying the verifiers to be run after every optimizer.
   * 
* * .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300; */ boolean hasInterOptimizerVerifierConfig(); /** *
   * VerifierConfig specifying the verifiers to be run after every optimizer.
   * 
* * .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300; */ org.tensorflow.framework.VerifierConfig getInterOptimizerVerifierConfig(); /** *
   * VerifierConfig specifying the verifiers to be run after every optimizer.
   * 
* * .tensorflow.VerifierConfig inter_optimizer_verifier_config = 300; */ org.tensorflow.framework.VerifierConfigOrBuilder getInterOptimizerVerifierConfigOrBuilder(); /** *
   * VerifierConfig specifying the verifiers to be run at the end, after all
   * optimizers have run.
   * 
* * .tensorflow.VerifierConfig post_optimization_verifier_config = 301; */ boolean hasPostOptimizationVerifierConfig(); /** *
   * VerifierConfig specifying the verifiers to be run at the end, after all
   * optimizers have run.
   * 
* * .tensorflow.VerifierConfig post_optimization_verifier_config = 301; */ org.tensorflow.framework.VerifierConfig getPostOptimizationVerifierConfig(); /** *
   * VerifierConfig specifying the verifiers to be run at the end, after all
   * optimizers have run.
   * 
* * .tensorflow.VerifierConfig post_optimization_verifier_config = 301; */ org.tensorflow.framework.VerifierConfigOrBuilder getPostOptimizationVerifierConfigOrBuilder(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy