All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.tensorflow.framework.RewriterConfigOrBuilder Maven / Gradle / Ivy

There is a newer version: 1.0.0-M2.1
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: tensorflow/core/protobuf/rewriter_config.proto

package org.tensorflow.framework;

public interface RewriterConfigOrBuilder extends
    // @@protoc_insertion_point(interface_extends:tensorflow.RewriterConfig)
    org.nd4j.shade.protobuf.MessageOrBuilder {

  /**
   * 
   * Optimize tensor layouts (default is ON)
   * e.g. This will try to use NCHW layout on GPU which is faster.
   * 
* * .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; * @return The enum numeric value on the wire for layoutOptimizer. */ int getLayoutOptimizerValue(); /** *
   * Optimize tensor layouts (default is ON)
   * e.g. This will try to use NCHW layout on GPU which is faster.
   * 
* * .tensorflow.RewriterConfig.Toggle layout_optimizer = 1; * @return The layoutOptimizer. */ org.tensorflow.framework.RewriterConfig.Toggle getLayoutOptimizer(); /** *
   * Fold constants (default is ON)
   * Statically infer the value of tensors when possible, and materialize the
   * result using constants.
   * 
* * .tensorflow.RewriterConfig.Toggle constant_folding = 3; * @return The enum numeric value on the wire for constantFolding. */ int getConstantFoldingValue(); /** *
   * Fold constants (default is ON)
   * Statically infer the value of tensors when possible, and materialize the
   * result using constants.
   * 
* * .tensorflow.RewriterConfig.Toggle constant_folding = 3; * @return The constantFolding. */ org.tensorflow.framework.RewriterConfig.Toggle getConstantFolding(); /** *
   * Shape optimizations (default is ON)
   * Simplify computations made on shapes.
   * 
* * .tensorflow.RewriterConfig.Toggle shape_optimization = 13; * @return The enum numeric value on the wire for shapeOptimization. */ int getShapeOptimizationValue(); /** *
   * Shape optimizations (default is ON)
   * Simplify computations made on shapes.
   * 
* * .tensorflow.RewriterConfig.Toggle shape_optimization = 13; * @return The shapeOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getShapeOptimization(); /** *
   * Remapping (default is ON)
   * Remap subgraphs onto more efficient implementations.
   * 
* * .tensorflow.RewriterConfig.Toggle remapping = 14; * @return The enum numeric value on the wire for remapping. */ int getRemappingValue(); /** *
   * Remapping (default is ON)
   * Remap subgraphs onto more efficient implementations.
   * 
* * .tensorflow.RewriterConfig.Toggle remapping = 14; * @return The remapping. */ org.tensorflow.framework.RewriterConfig.Toggle getRemapping(); /** *
   * Arithmetic optimizations (default is ON)
   * e.g. Simplify arithmetic ops; merge ops with same value (like constants).
   * 
* * .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; * @return The enum numeric value on the wire for arithmeticOptimization. */ int getArithmeticOptimizationValue(); /** *
   * Arithmetic optimizations (default is ON)
   * e.g. Simplify arithmetic ops; merge ops with same value (like constants).
   * 
* * .tensorflow.RewriterConfig.Toggle arithmetic_optimization = 7; * @return The arithmeticOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getArithmeticOptimization(); /** *
   * Control dependency optimizations (default is ON).
   * Remove redundant control dependencies, which may enable other optimization.
   * 
* * .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; * @return The enum numeric value on the wire for dependencyOptimization. */ int getDependencyOptimizationValue(); /** *
   * Control dependency optimizations (default is ON).
   * Remove redundant control dependencies, which may enable other optimization.
   * 
* * .tensorflow.RewriterConfig.Toggle dependency_optimization = 8; * @return The dependencyOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getDependencyOptimization(); /** *
   * Loop optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle loop_optimization = 9; * @return The enum numeric value on the wire for loopOptimization. */ int getLoopOptimizationValue(); /** *
   * Loop optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle loop_optimization = 9; * @return The loopOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getLoopOptimization(); /** *
   * Function optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle function_optimization = 10; * @return The enum numeric value on the wire for functionOptimization. */ int getFunctionOptimizationValue(); /** *
   * Function optimizations (default is ON).
   * 
* * .tensorflow.RewriterConfig.Toggle function_optimization = 10; * @return The functionOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getFunctionOptimization(); /** *
   * Strips debug-related nodes from the graph (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle debug_stripper = 11; * @return The enum numeric value on the wire for debugStripper. */ int getDebugStripperValue(); /** *
   * Strips debug-related nodes from the graph (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle debug_stripper = 11; * @return The debugStripper. */ org.tensorflow.framework.RewriterConfig.Toggle getDebugStripper(); /** *
   * If true, don't remove unnecessary ops from the graph
   * 
* * bool disable_model_pruning = 2; * @return The disableModelPruning. */ boolean getDisableModelPruning(); /** *
   * Try to allocate some independent Op outputs contiguously in order to
   * merge or eliminate downstream Ops (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; * @return The enum numeric value on the wire for scopedAllocatorOptimization. */ int getScopedAllocatorOptimizationValue(); /** *
   * Try to allocate some independent Op outputs contiguously in order to
   * merge or eliminate downstream Ops (off by default).
   * 
* * .tensorflow.RewriterConfig.Toggle scoped_allocator_optimization = 15; * @return The scopedAllocatorOptimization. */ org.tensorflow.framework.RewriterConfig.Toggle getScopedAllocatorOptimization(); /** *
   * Controls how many times we run the optimizers in meta optimizer (default
   * is once).
   * 
* * .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; * @return The enum numeric value on the wire for metaOptimizerIterations. */ int getMetaOptimizerIterationsValue(); /** *
   * Controls how many times we run the optimizers in meta optimizer (default
   * is once).
   * 
* * .tensorflow.RewriterConfig.NumIterationsType meta_optimizer_iterations = 12; * @return The metaOptimizerIterations. */ org.tensorflow.framework.RewriterConfig.NumIterationsType getMetaOptimizerIterations(); /** *
   * Configures memory optimization passes through the meta-optimizer. Has no
   * effect on manually requested memory optimization passes in the optimizers
   * field.
   * 
* * .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; * @return The enum numeric value on the wire for memoryOptimization. */ int getMemoryOptimizationValue(); /** *
   * Configures memory optimization passes through the meta-optimizer. Has no
   * effect on manually requested memory optimization passes in the optimizers
   * field.
   * 
* * .tensorflow.RewriterConfig.MemOptType memory_optimization = 4; * @return The memoryOptimization. */ org.tensorflow.framework.RewriterConfig.MemOptType getMemoryOptimization(); /** *
   * A node name scope for node names which are valid outputs of recompuations.
   * Inputs to nodes that match this scope may be recomputed (subject either to
   * manual annotation of those input nodes or to manual annotation and
   * heuristics depending on memory_optimization), but the nodes themselves will
   * not be recomputed. This matches any sub-scopes as well, meaning the scope
   * can appear not just as a top-level scope. For example, if the value is
   * "gradients/", the default, it will match node name "gradients/foo",
   * "foo/gradients/bar", but not "foo_gradients/"
   * 
* * string memory_optimizer_target_node_name_scope = 6; * @return The memoryOptimizerTargetNodeNameScope. */ java.lang.String getMemoryOptimizerTargetNodeNameScope(); /** *
   * A node name scope for node names which are valid outputs of recompuations.
   * Inputs to nodes that match this scope may be recomputed (subject either to
   * manual annotation of those input nodes or to manual annotation and
   * heuristics depending on memory_optimization), but the nodes themselves will
   * not be recomputed. This matches any sub-scopes as well, meaning the scope
   * can appear not just as a top-level scope. For example, if the value is
   * "gradients/", the default, it will match node name "gradients/foo",
   * "foo/gradients/bar", but not "foo_gradients/"
   * 
* * string memory_optimizer_target_node_name_scope = 6; * @return The bytes for memoryOptimizerTargetNodeNameScope. */ org.nd4j.shade.protobuf.ByteString getMemoryOptimizerTargetNodeNameScopeBytes(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; * @return Whether the autoParallel field is set. */ boolean hasAutoParallel(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; * @return The autoParallel. */ org.tensorflow.framework.AutoParallelOptions getAutoParallel(); /** *
   * Configures AutoParallel optimization passes either through the
   * meta-optimizer or when manually specified through the optimizers field.
   * 
* * .tensorflow.AutoParallelOptions auto_parallel = 5; */ org.tensorflow.framework.AutoParallelOptionsOrBuilder getAutoParallelOrBuilder(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; * @return Whether the scopedAllocatorOpts field is set. */ boolean hasScopedAllocatorOpts(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; * @return The scopedAllocatorOpts. */ org.tensorflow.framework.ScopedAllocatorOptions getScopedAllocatorOpts(); /** * .tensorflow.ScopedAllocatorOptions scoped_allocator_opts = 16; */ org.tensorflow.framework.ScopedAllocatorOptionsOrBuilder getScopedAllocatorOptsOrBuilder(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom registered optimizers will be run after the base optimizers, in
   * the order that they are specified.
   * 
* * repeated string optimizers = 100; * @return A list containing the optimizers. */ java.util.List getOptimizersList(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom registered optimizers will be run after the base optimizers, in
   * the order that they are specified.
   * 
* * repeated string optimizers = 100; * @return The count of optimizers. */ int getOptimizersCount(); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom registered optimizers will be run after the base optimizers, in
   * the order that they are specified.
   * 
* * repeated string optimizers = 100; * @param index The index of the element to return. * @return The optimizers at the given index. */ java.lang.String getOptimizers(int index); /** *
   * If non-empty, will use this as an alternative way to specify a list of
   * optimizations to turn on and the order of the optimizations (replacing the
   * meta-optimizer).
   * Of the RewriterConfig options, only the AutoParallel configuration options
   * (the auto_parallel field) apply to manually requested optimization passes
   * ("autoparallel"). Memory optimization passes ("memory") invoked here are
   * not configurable (in contrast to memory optimization passes through the
   * meta-optimizer) and act only on manual op annotations.
   * Custom registered optimizers will be run after the base optimizers, in
   * the order that they are specified.
   * 
* * repeated string optimizers = 100; * @param index The index of the value to return. * @return The bytes of the optimizers at the given index. */ org.nd4j.shade.protobuf.ByteString getOptimizersBytes(int index); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ java.util.List getCustomOptimizersList(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ org.tensorflow.framework.RewriterConfig.CustomGraphOptimizer getCustomOptimizers(int index); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ int getCustomOptimizersCount(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ java.util.List getCustomOptimizersOrBuilderList(); /** *
   * list of CustomGraphOptimizers to apply.
   * 
* * repeated .tensorflow.RewriterConfig.CustomGraphOptimizer custom_optimizers = 200; */ org.tensorflow.framework.RewriterConfig.CustomGraphOptimizerOrBuilder getCustomOptimizersOrBuilder( int index); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy