org.tensorflow.framework.RunOptionsOrBuilder Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto Show documentation
Show all versions of proto Show documentation
Java API for TensorFlow protocol buffers.
The newest version!
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: tensorflow/core/protobuf/config.proto
package org.tensorflow.framework;
public interface RunOptionsOrBuilder extends
// @@protoc_insertion_point(interface_extends:tensorflow.RunOptions)
com.google.protobuf.MessageOrBuilder {
/**
* .tensorflow.RunOptions.TraceLevel trace_level = 1;
*/
int getTraceLevelValue();
/**
* .tensorflow.RunOptions.TraceLevel trace_level = 1;
*/
org.tensorflow.framework.RunOptions.TraceLevel getTraceLevel();
/**
*
* Time to wait for operation to complete in milliseconds.
*
*
* int64 timeout_in_ms = 2;
*/
long getTimeoutInMs();
/**
*
* The thread pool to use, if session_inter_op_thread_pool is configured.
* To use the caller thread set this to -1 - this uses the caller thread
* to execute Session::Run() and thus avoids a context switch. Using the
* caller thread to execute Session::Run() should be done ONLY for simple
* graphs, where the overhead of an additional context switch is
* comparable with the overhead of Session::Run().
*
*
* int32 inter_op_thread_pool = 3;
*/
int getInterOpThreadPool();
/**
*
* Whether the partition graph(s) executed by the executor(s) should be
* outputted via RunMetadata.
*
*
* bool output_partition_graphs = 5;
*/
boolean getOutputPartitionGraphs();
/**
*
* EXPERIMENTAL. Options used to initialize DebuggerState, if enabled.
*
*
* .tensorflow.DebugOptions debug_options = 6;
*/
boolean hasDebugOptions();
/**
*
* EXPERIMENTAL. Options used to initialize DebuggerState, if enabled.
*
*
* .tensorflow.DebugOptions debug_options = 6;
*/
org.tensorflow.framework.DebugOptions getDebugOptions();
/**
*
* EXPERIMENTAL. Options used to initialize DebuggerState, if enabled.
*
*
* .tensorflow.DebugOptions debug_options = 6;
*/
org.tensorflow.framework.DebugOptionsOrBuilder getDebugOptionsOrBuilder();
/**
*
* When enabled, causes tensor allocation information to be included in
* the error message when the Run() call fails because the allocator ran
* out of memory (OOM).
* Enabling this option can slow down the Run() call.
*
*
* bool report_tensor_allocations_upon_oom = 7;
*/
boolean getReportTensorAllocationsUponOom();
/**
* .tensorflow.RunOptions.Experimental experimental = 8;
*/
boolean hasExperimental();
/**
* .tensorflow.RunOptions.Experimental experimental = 8;
*/
org.tensorflow.framework.RunOptions.Experimental getExperimental();
/**
* .tensorflow.RunOptions.Experimental experimental = 8;
*/
org.tensorflow.framework.RunOptions.ExperimentalOrBuilder getExperimentalOrBuilder();
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy