All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.bytedeco.tensorflowlite.Interpreter Maven / Gradle / Ivy

The newest version!
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE

package org.bytedeco.tensorflowlite;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.tensorflowlite.global.tensorflowlite.*;
  // Class for friend declarations.

@Namespace("tflite::impl") @NoOffset @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class)
public class Interpreter extends Pointer {
    static { Loader.load(); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public Interpreter(Pointer p) { super(p); }

  // Instantiate an interpreter. All errors associated with reading and
  // processing this model will be forwarded to the error_reporter object.
  //
  // Note, if error_reporter is nullptr, then a default StderrReporter is
  // used. Ownership of 'error_reporter' remains with the caller.
  // WARNING: Use of this constructor outside of an InterpreterBuilder is not
  // recommended.
  public Interpreter(ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/) { super((Pointer)null); allocate(error_reporter); }
  @UniquePtr @Name("std::make_unique") private native void allocate(ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/);
  public Interpreter() { super((Pointer)null); allocate(); }
  @UniquePtr @Name("std::make_unique") private native void allocate();

  // Interpreters are not copyable as they have non-trivial memory semantics.
  
  

  // Functions to build interpreter
// #ifndef DOXYGEN_SKIP
  /** Provide a list of tensor indexes that are inputs to the model.
   *  Each index is bound check and this modifies the consistent_ flag of the
   *  interpreter. */
  public native @Cast("TfLiteStatus") int SetInputs(@StdVector IntPointer inputs);
  public native @Cast("TfLiteStatus") int SetInputs(@StdVector IntBuffer inputs);
  public native @Cast("TfLiteStatus") int SetInputs(@StdVector int[] inputs);

  /** Provide a list of tensor indexes that are outputs to the model
   *  Each index is bound check and this modifies the consistent_ flag of the
   *  interpreter. */
  public native @Cast("TfLiteStatus") int SetOutputs(@StdVector IntPointer outputs);
  public native @Cast("TfLiteStatus") int SetOutputs(@StdVector IntBuffer outputs);
  public native @Cast("TfLiteStatus") int SetOutputs(@StdVector int[] outputs);

  /** Provide a list of tensor indexes that are variable tensors.
   *  Each index is bound check and this modifies the consistent_ flag of the
   *  interpreter. */
  public native @Cast("TfLiteStatus") int SetVariables(@StdVector IntPointer variables);
  public native @Cast("TfLiteStatus") int SetVariables(@StdVector IntBuffer variables);
  public native @Cast("TfLiteStatus") int SetVariables(@StdVector int[] variables);

  /** Adds a node with the given parameters and returns the index of the new
   *  node in {@code node_index} (optionally). Interpreter will take ownership of
   *  {@code builtin_data} and destroy it with {@code free}. Ownership of 'init_data'
   *  remains with the caller. */
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntPointer inputs,
                                       @StdVector IntPointer outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       IntPointer node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntPointer inputs,
                                       @StdVector IntPointer outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntBuffer inputs,
                                       @StdVector IntBuffer outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       IntBuffer node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntBuffer inputs,
                                       @StdVector IntBuffer outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector int[] inputs,
                                       @StdVector int[] outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       int[] node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector int[] inputs,
                                       @StdVector int[] outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntPointer inputs,
                                       @StdVector IntPointer outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       IntPointer node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntPointer inputs,
                                       @StdVector IntPointer outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntBuffer inputs,
                                       @StdVector IntBuffer outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       IntBuffer node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector IntBuffer inputs,
                                       @StdVector IntBuffer outputs,
                                       @Cast("const char*") BytePointer init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector int[] inputs,
                                       @StdVector int[] outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration,
                                       int[] node_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddNodeWithParameters(@StdVector int[] inputs,
                                       @StdVector int[] outputs,
                                       String init_data,
                                       @Cast("size_t") long init_data_size, Pointer builtin_data,
                                       @Const TfLiteRegistration registration);

  /** Adds {@code tensors_to_add} tensors, preserving pre-existing Tensor entries.
   *  The value pointed to by {@code first_new_tensor_index} will be set to the
   *  index of the first new tensor if {@code first_new_tensor_index} is non-null. */
  public native @Cast("TfLiteStatus") int AddTensors(int tensors_to_add,
                            IntPointer first_new_tensor_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddTensors(int tensors_to_add);
  public native @Cast("TfLiteStatus") int AddTensors(int tensors_to_add,
                            IntBuffer first_new_tensor_index/*=nullptr*/);
  public native @Cast("TfLiteStatus") int AddTensors(int tensors_to_add,
                            int[] first_new_tensor_index/*=nullptr*/);

  /** Set description of inputs/outputs/data/fptrs for node {@code node_index}.
   *  This variant assumes an external buffer has been allocated of size
   *  bytes. The lifetime of buffer must be ensured to be greater or equal
   *  to Interpreter. */
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantization quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantization quantization,
        String buffer, @Cast("size_t") long bytes);

  /** Legacy. Deprecated in favor of above. */
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes,
        @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);

  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("const char*") BytePointer buffer, @Cast("size_t") long bytes);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes, @Const Allocation allocation/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadOnly(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        String buffer, @Cast("size_t") long bytes);

  /** Set description of inputs/outputs/data/fptrs for node {@code node_index}.
   *  This variant assumes an external buffer has been allocated of size
   *  bytes. The lifetime of buffer must be ensured to be greater or equal
   *  to Interpreter. */
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector IntPointer dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector IntPointer dims,
                                              @ByVal TfLiteQuantization quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector IntBuffer dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector IntBuffer dims,
                                              @ByVal TfLiteQuantization quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector int[] dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector int[] dims,
                                              @ByVal TfLiteQuantization quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector IntPointer dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector IntPointer dims,
                                              @ByVal TfLiteQuantization quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector IntBuffer dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              @Cast("const char*") BytePointer name,
                                              @StdVector IntBuffer dims,
                                              @ByVal TfLiteQuantization quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector int[] dims,
                                              @ByVal TfLiteQuantization quantization,
                                              @Cast("bool") boolean is_variable/*=false*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(int tensor_index, @Cast("TfLiteType") int type,
                                              String name,
                                              @StdVector int[] dims,
                                              @ByVal TfLiteQuantization quantization);

  /** Legacy. Deprecated in favor of above. */
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector IntPointer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector IntBuffer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector int[] dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector IntPointer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector IntPointer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector IntBuffer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name,
        @StdVector IntBuffer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/,
        @StdVector int[] dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name,
        @StdVector int[] dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const IntPointer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const IntBuffer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const int[] dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const IntPointer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const IntPointer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const IntBuffer dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, @Cast("size_t") long rank,
        @Const IntBuffer dims, @ByVal TfLiteQuantizationParams quantization);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization,
        @Cast("bool") boolean is_variable/*=false*/, @Cast("size_t") long rank_dims_signature/*=0*/,
        @Const int[] dims_signature/*=nullptr*/);
  public native @Cast("TfLiteStatus") int SetTensorParametersReadWrite(
        int tensor_index, @Cast("TfLiteType") int type, String name, @Cast("size_t") long rank,
        @Const int[] dims, @ByVal TfLiteQuantizationParams quantization);

  /** Enables application to cancel in flight invocation with {@code Cancel}.
   *  This can be only set when building the interpreter and should not called
   *  directly.
   *  NOTE: This function does not affect cancellation triggered by the callback
   *  passed in {@code SetCancellationFunction}. */
  public native @Cast("TfLiteStatus") int EnableCancellation();
// #endif  // DOXYGEN_SKIP
  // Functions to access tensor data

  /** Read only access to list of inputs. */
  public native @StdVector IntPointer inputs();

  /** Return the name of a given input. The given index must be between 0 and
   *  inputs().size(). */
  public native @Cast("const char*") BytePointer GetInputName(int index);

  /** Read only access to list of outputs. */
  public native @StdVector IntPointer outputs();

  /** Read only access to list of variable tensors. */
  public native @StdVector IntPointer variables();

  /** Return the name of a given output. The given index must be between 0 and
   *  outputs().size(). */
  public native @Cast("const char*") BytePointer GetOutputName(int index);

  /** Return the number of tensors in the model. */
  public native @Cast("size_t") long tensors_size();

  /** Return the number of ops in the model. */
  public native @Cast("size_t") long nodes_size();

  /** \warning Experimental interface, subject to change. */
  public native @StdVector IntPointer execution_plan();

  /** Get a mutable tensor data structure. */
  // TODO(aselle): Create a safe ArrayHandle interface to avoid exposing this
  // read/write access to structure
  public native TfLiteTensor tensor(int tensor_index);

  /** Get an immutable tensor data structure. */

  /** Returns a pointer to an operation and registration data structure if in
   *  bounds from the primary subgraph(subgraph_[0]). */
  public native @Const RegistrationNodePair node_and_registration(
        int node_index);

  /** Returns a pointer to an operation and registration data structure if in
   *  bounds. */
  public native @Const RegistrationNodePair node_and_registration(
        int subgraph_index, int node_index);

  /** Perform a checked cast to the appropriate tensor type (mutable pointer
   *  version). */
  public native @Name("typed_tensor") BytePointer typed_tensor_byte(int tensor_index);
  public native @Name("typed_tensor") ShortPointer typed_tensor_short(int tensor_index);
  public native @Name("typed_tensor") IntPointer typed_tensor_int(int tensor_index);
  public native @Cast("int64_t*") @Name("typed_tensor") LongPointer typed_tensor_long(int tensor_index);
  public native @Name("typed_tensor") FloatPointer typed_tensor_float(int tensor_index);
  public native @Name("typed_tensor") DoublePointer typed_tensor_double(int tensor_index);
  public native @Cast("bool*") @Name("typed_tensor") BoolPointer typed_tensor_bool(int tensor_index);
  public native @Name("typed_tensor") TfLiteFloat16 typed_tensor_float16(int tensor_index);

  /** Perform a checked cast to the appropriate tensor type (immutable pointer
   *  version). */

  /** \brief Returns list of all keys of different method signatures defined
   *  in the model.
   *  Note, pointers returned have lifetime same as the Interpreter object. */
  public native @ByVal StringVector signature_keys();

  /** \brief Returns a pointer to the SignatureRunner instance to run the part
   *  of the graph identified by a SignatureDef.  If the model does not have any
   *  signature defs, pass nullptr as signature_key and a SignatureRunner will
   *  be created using the primary subgraph (0).  A nullptr is returned if the
   *  given signature_key is not valid.  Note, the returned SignatureRunner
   *  instance is owned by and has the same lifetime as the Interpreter object;
   *  additionally, class SignatureRunner is *not* thread-safe.
   *  This function will additionally apply default delegates unless
   *  {@code apply_default_delegate} is set to false.
   *  If you need to specify delegates, you have to do that before calling this
   *  function or provide {@code apply_default_delegate} as false and applying
   *  delegates later. */
  public native SignatureRunner GetSignatureRunner(@Cast("const char*") BytePointer signature_key,
                                        @Cast("bool") boolean apply_default_delegate/*=true*/);
  public native SignatureRunner GetSignatureRunner(@Cast("const char*") BytePointer signature_key);
  public native SignatureRunner GetSignatureRunner(String signature_key,
                                        @Cast("bool") boolean apply_default_delegate/*=true*/);
  public native SignatureRunner GetSignatureRunner(String signature_key);

  /** \warning Experimental interface, subject to change. \n \brief Returns a
   *  pointer to the AsyncSignatureRunner instance to run the part of the graph
   *  identified by a SignatureDef.  If the model does not have any signature
   *  defs, pass nullptr as signature_key and an AsyncSignatureRunner will be
   *  created using the primary subgraph (0).  A nullptr is returned if the
   *  given signature_key is not valid.  Note, the returned AsyncSignatureRunner
   *  instance is owned by and has the same lifetime as the Interpreter object;
   *  additionally, class AsyncSignatureRunner is *not* thread-safe.
   *  The async delegate should be applied before calling this function. */

  /** \warning Experimental interface, subject to change. \n
   *  \brief Return the subgraph index that corresponds to a SignatureDef,
   *  defined by 'signature_key'.
   *  If invalid name passed, -1 will be returned. */
  public native int GetSubgraphIndexFromSignature(@Cast("const char*") BytePointer signature_key);
  public native int GetSubgraphIndexFromSignature(String signature_key);

  /** \brief Returns the mapping of inputs to tensor index in the signature
   *  specified through 'signature_key'.
   *  If invalid name passed, an empty list will be returned. */
  public native @Const @ByRef StringIntMap signature_inputs(
        @Cast("const char*") BytePointer signature_key);
  public native @Const @ByRef StringIntMap signature_inputs(
        String signature_key);

  /** \brief Returns the mapping of outputs to tensor index in the signature
   *  specified through 'signature_key'.
   *  If invalid name passed, an empty list will be returned. */
  public native @Const @ByRef StringIntMap signature_outputs(
        @Cast("const char*") BytePointer signature_key);
  public native @Const @ByRef StringIntMap signature_outputs(
        String signature_key);

  /** \brief Returns the input tensor identified by 'signature_input_name' in
   *  the signature identified by 'signature_key'.
   *  Returns nullptr if not found. */
  public native TfLiteTensor input_tensor_by_signature(@Cast("const char*") BytePointer signature_input_name,
                                            @Cast("const char*") BytePointer signature_key);
  public native TfLiteTensor input_tensor_by_signature(String signature_input_name,
                                            String signature_key);

  /** \brief Returns the output tensor identified by 'signature_output_name' in
   *  the signature identified by 'signature_key'.
   *  Returns nullptr if not found. */
  public native @Const TfLiteTensor output_tensor_by_signature(
        @Cast("const char*") BytePointer signature_output_name, @Cast("const char*") BytePointer signature_key);
  public native @Const TfLiteTensor output_tensor_by_signature(
        String signature_output_name, String signature_key);

  /** Return a mutable pointer to the given input tensor. The given index must
   *  be between 0 and inputs().size(). */
  public native TfLiteTensor input_tensor(@Cast("size_t") long index);

  /** Return an immutable pointer to the given input tensor. The given index
   *  must be between 0 and inputs().size(). */

  /** Return a mutable pointer into the data of a given input tensor. The given
   *  index must be between 0 and inputs().size(). */
  public native @Name("typed_input_tensor") BytePointer typed_input_tensor_byte(int index);
  public native @Name("typed_input_tensor") ShortPointer typed_input_tensor_short(int index);
  public native @Name("typed_input_tensor") IntPointer typed_input_tensor_int(int index);
  public native @Cast("int64_t*") @Name("typed_input_tensor") LongPointer typed_input_tensor_long(int index);
  public native @Name("typed_input_tensor") FloatPointer typed_input_tensor_float(int index);
  public native @Name("typed_input_tensor") DoublePointer typed_input_tensor_double(int index);
  public native @Cast("bool*") @Name("typed_input_tensor") BoolPointer typed_input_tensor_bool(int index);
  public native @Name("typed_input_tensor") TfLiteFloat16 typed_input_tensor_float16(int index);

  /** Return an immutable pointer into the data of a given input tensor. The
   *  given index must be between 0 and inputs().size(). */

  /** Return a mutable pointer to the given output tensor. The given index must
   *  be between 0 and outputs().size(). */
  public native TfLiteTensor output_tensor(@Cast("size_t") long index);

  /** Return an immutable pointer to the given output tensor. The given index
   *  must be between 0 and outputs().size(). */

  /** Return a mutable pointer into the data of a given output tensor. The given
   *  index must be between 0 and outputs().size(). */
  public native @Name("typed_output_tensor") BytePointer typed_output_tensor_byte(int index);
  public native @Name("typed_output_tensor") ShortPointer typed_output_tensor_short(int index);
  public native @Name("typed_output_tensor") IntPointer typed_output_tensor_int(int index);
  public native @Cast("int64_t*") @Name("typed_output_tensor") LongPointer typed_output_tensor_long(int index);
  public native @Name("typed_output_tensor") FloatPointer typed_output_tensor_float(int index);
  public native @Name("typed_output_tensor") DoublePointer typed_output_tensor_double(int index);
  public native @Cast("bool*") @Name("typed_output_tensor") BoolPointer typed_output_tensor_bool(int index);

  /** Return an immutable pointer into the data of a given output tensor. The
   *  given index must be between 0 and outputs().size(). */

  /** Change the dimensionality of a given tensor. Note, this is only acceptable
   *  for tensor indices that are inputs or variables.
   *  Returns status of failure or success. Note that this doesn't actually
   *  resize any existing buffers. A call to AllocateTensors() is required to
   *  change the tensor input buffer. */
  public native @Cast("TfLiteStatus") int ResizeInputTensor(int tensor_index,
                                   @StdVector IntPointer dims);
  public native @Cast("TfLiteStatus") int ResizeInputTensor(int tensor_index,
                                   @StdVector IntBuffer dims);
  public native @Cast("TfLiteStatus") int ResizeInputTensor(int tensor_index,
                                   @StdVector int[] dims);

  /** Change the dimensionality of a given tensor. This is only acceptable for
   *  tensor indices that are inputs or variables. Only unknown dimensions can
   *  be resized with this function. Unknown dimensions are indicated as {@code -1} in
   *  the {@code dims_signature} attribute of a {@code TfLiteTensor}. Returns status of
   *  failure or success.  Note that this doesn't actually resize any existing
   *  buffers. A call to AllocateTensors() is required to change the tensor
   *  input buffer. */
  public native @Cast("TfLiteStatus") int ResizeInputTensorStrict(int tensor_index,
                                         @StdVector IntPointer dims);
  public native @Cast("TfLiteStatus") int ResizeInputTensorStrict(int tensor_index,
                                         @StdVector IntBuffer dims);
  public native @Cast("TfLiteStatus") int ResizeInputTensorStrict(int tensor_index,
                                         @StdVector int[] dims);

  /** \warning Experimental interface, subject to change. \n
   *  \brief This releases memory held by non-persistent tensors. It does NOT
   *  re-perform memory planning. AllocateTensors needs to be called before next
   *  invocation. */
  public native @Cast("TfLiteStatus") int ReleaseNonPersistentMemory();

  /** Update allocations for all tensors. This will redim dependent tensors
   *  using the input tensor dimensionality as given. This is relatively
   *  expensive. This *must be* called after the interpreter has been created
   *  and before running inference (and accessing tensor buffers), and *must be*
   *  called again if (and only if) an input tensor is resized. Returns status
   *  of success or failure.  Will fail if any of the ops in the model (other
   *  than those which were rewritten by delegates, if any) are not supported by
   *  the Interpreter's OpResolver. */
  
  ///
  public native @Cast("TfLiteStatus") int AllocateTensors();

  /** Invoke the interpreter (run the whole graph in dependency order).
   * 
   *  NOTE: It is possible that the interpreter is not in a ready state
   *  to evaluate (i.e. if a ResizeTensor() has been performed without an
   *  AllocateTensors().
   *  Returns status of success or failure. */
  
  ///
  ///
  ///
  ///
  public native @Cast("TfLiteStatus") int Invoke();

  /** Set the number of threads available to the interpreter.
   * 
   *  NOTE: {@code num_threads} should be >= -1. Setting {@code num_threads} to 0 has the
   *  effect to disable multithreading, which is equivalent to setting
   *  {@code num_threads} to 1. If set to the value -1, the number of threads used
   *  will be implementation-defined and platform-dependent.
   * 
   *  As TfLite interpreter could internally apply a TfLite delegate by default
   *  (i.e. XNNPACK), the number of threads that are available to the default
   *  delegate *should be* set via InterpreterBuilder APIs as follows:
   * 
   *      std::unique_ptr interpreter;
   *      tflite::InterpreterBuilder builder(tflite model, op resolver);
   *      builder.SetNumThreads(...)
   *      ASSERT_EQ(builder(&interpreter), kTfLiteOk);
   * 
   *  WARNING: This API is deprecated: prefer using
   *  {@code InterpreterBuilder::SetNumThreads}, as documented above. */
  
  ///
  public native @Cast("TfLiteStatus") int SetNumThreads(int num_threads);

  /** Allow float16 precision for FP32 calculation when possible.
   *  Default: not allow.
   * 
   *  WARNING: This API is deprecated: prefer controlling this via delegate
   *  options, e.g. {@code tflite::StatefulNnApiDelegate::Options::allow_fp16' or
   *  }TfLiteGpuDelegateOptionsV2::is_precision_loss_allowed{@code .
   *  This method will be removed in a future release. */
  public native void SetAllowFp16PrecisionForFp32(@Cast("bool") boolean allow);

  /** \warning Experimental interface, subject to change. \n
   *  \brief Get the half precision flag. */
  public native @Cast("bool") boolean GetAllowFp16PrecisionForFp32();

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Sets the cancellation function pointer in order to cancel a request
   *  in the middle of a call to Invoke(). The interpreter queries this function
   *  during inference, between op invocations; when it returns true, the
   *  interpreter will abort execution and return {@code kTfLiteError}. The {@code data}
   *  parameter contains any data used by the cancellation function, and if
   *  non-null, remains owned by the caller. */
  public static class Check_cancelled_func_Pointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    Check_cancelled_func_Pointer(Pointer p) { super(p); }
      protected Check_cancelled_func_Pointer() { allocate(); }
      private native void allocate();
      public native @Cast("bool") boolean call(Pointer arg0);
  }
  public native void SetCancellationFunction(Pointer data, Check_cancelled_func_Pointer check_cancelled_func);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief  Attempts to cancel in flight invocation if any.
   *  This will not affect {@code Invoke}s that happends after the cancellation.
   *  Non blocking. Thread safe.
   *  Returns kTfLiteError if cancellation is not enabled, otherwise returns
   *  kTfLiteOk. */
  public native @Cast("TfLiteStatus") int Cancel();

  /** \brief Allow a delegate to look at the graph and modify the graph to
   *  handle parts of the graph themselves. After this is called, the graph may
   *  contain new nodes that replace 1 more nodes.
   *  'delegate' must outlive the interpreter.
   *  Returns one of the following status codes:
   *  1. kTfLiteOk: Success.
   *  2. kTfLiteDelegateError: Delegation failed due to an error in the
   *  delegate, or the delegate parameter was null. The Interpreter has been
   *  restored to its pre-delegation state.
   *  NOTE: This undoes all delegates previously applied to the Interpreter.
   *  3. kTfLiteApplicationError : Delegation failed to be applied due to the
   *  incompatibility with the TfLite runtime, e.g., the model graph is already
   *  immutable when applying the delegate. However, the interpreter could still
   *  be invoked.
   *  4. kTfLiteUnresolvedOps: Delegation failed because the model has an
   *  operator that cannot be resolved. This can happen when the op is not
   *  registered or built with the TF Lite framework.
   *  5. kTfLiteError: Unexpected/runtime failure. \n
   *  \warning This is an experimental API and subject to change. \n */
  public native @Cast("TfLiteStatus") int ModifyGraphWithDelegate(TfLiteDelegate delegate);
  public native @Cast("TfLiteStatus") int ModifyGraphWithDelegate(TfLiteOpaqueDelegateStruct delegate);

  // Owning handle to a TfLiteDelegate instance.

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Same as ModifyGraphWithDelegate except this interpreter takes
   *  ownership of the provided delegate. */

  /** This overload is *never* OK. TfLiteDelegate is a C structure, so it has no
   *  virtual destructor. The default deleter of the unique_ptr does not know
   *  how to delete C++ objects deriving from TfLiteDelegate. */
  

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Ensure the data in {@code tensor.data} is readable. If a
   *  delegate has been used, and {@code SetAllowBufferHandleOutput(true)} has been
   *  called, tensor outputs may be stored as delegate buffer handles whose data
   *  is not directly readable until this method has been called.
   *  In such cases, this method will copy the data from the delegate buffer
   *  handle to CPU memory. */
  public native @Cast("TfLiteStatus") int EnsureTensorDataIsReadable(int tensor_index);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Set the delegate buffer handle to a tensor. It can be called in the
   *  following cases:
   *  1. Set the buffer handle to a tensor that's not being written by a
   *     delegate. For example, feeding an OpenGL texture as the input of the
   *     inference graph.
   *  2. Set the buffer handle to a tensor that uses the same delegate.
   *     For example, set an OpenGL texture as the output of inference, while
   *     the node which produces output is an OpenGL delegate node. */
  public native @Cast("TfLiteStatus") int SetBufferHandle(int tensor_index,
                                 @Cast("TfLiteBufferHandle") int buffer_handle,
                                 TfLiteDelegate delegate);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Set the delegate buffer handle to the given tensor. */
  // It can be called in the following cases:
  // 1. Set the buffer handle to a tensor that is used by other computing
  // hardware such as EdgeTpu. For example, EdgeTpu delegate imports a tensor's
  // memory into EdgeTpu's virtual address and returns a buffer handle. Then
  // EdgeTpu delegate calls this API to associate the tensor with the buffer
  // handle. Example bug b/277217867.
  public native @Cast("TfLiteStatus") int SetBufferHandle(TfLiteTensor tensor,
                                 @Cast("TfLiteBufferHandle") int buffer_handle,
                                 TfLiteDelegate delegate);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Get the delegate buffer handle, and the delegate which can process
   *  the buffer handle. */
  public native @Cast("TfLiteStatus") int GetBufferHandle(int tensor_index,
                                 @Cast("TfLiteBufferHandle*") IntPointer buffer_handle,
                                 @Cast("TfLiteDelegate**") PointerPointer delegate);
  public native @Cast("TfLiteStatus") int GetBufferHandle(int tensor_index,
                                 @Cast("TfLiteBufferHandle*") IntPointer buffer_handle,
                                 @ByPtrPtr TfLiteDelegate delegate);
  public native @Cast("TfLiteStatus") int GetBufferHandle(int tensor_index,
                                 @Cast("TfLiteBufferHandle*") IntBuffer buffer_handle,
                                 @ByPtrPtr TfLiteDelegate delegate);
  public native @Cast("TfLiteStatus") int GetBufferHandle(int tensor_index,
                                 @Cast("TfLiteBufferHandle*") int[] buffer_handle,
                                 @ByPtrPtr TfLiteDelegate delegate);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Sets the profiler to tracing execution. The caller retains
   *  ownership of the profiler and must ensure its validity.
   *  Previously registered profilers will be unregistered.
   *  If {@code profiler} is nullptr, all previously installed profilers will be
   *  removed. */
  public native void SetProfiler(Profiler profiler);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Same as SetProfiler except this interpreter takes ownership
   *  of the provided profiler.
   *  Previously registered profilers will be unregistered.
   *  If {@code profiler} is nullptr, all previously installed profilers will be
   *  removed. */

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Adds the profiler to tracing execution. The caller retains
   *  ownership of the profiler and must ensure its validity.
   *  nullptr {@code profiler} will be ignored. */
  public native void AddProfiler(Profiler profiler);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Adds the profiler to tracing execution. Transfers
   *  ownership of the profiler to the interpreter.
   *  nullptr {@code profiler} will be ignored. */

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Gets the profiler used for op tracing. */
  public native Profiler GetProfiler();

  // The default capacity of `tensors_` vector.
  @MemberGetter public static native int kTensorsReservedCapacity();
  public static final int kTensorsReservedCapacity = kTensorsReservedCapacity();
  /** The capacity headroom of {@code tensors_} vector before calling ops'
   *  {@code prepare} and {@code invoke} function. In these functions, it's guaranteed
   *  allocating up to {@code kTensorsCapacityHeadroom} more tensors won't invalidate
   *  pointers to existing tensors. */
  
  ///
  @MemberGetter public static native int kTensorsCapacityHeadroom();
  public static final int kTensorsCapacityHeadroom = kTensorsCapacityHeadroom();

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Set if buffer handle output is allowed.
   * 
   *  When using hardware delegation, Interpreter will make the data of output
   *  tensors available in {@code tensor->data} by default. If the application can
   *  consume the buffer handle directly (e.g. reading output from OpenGL
   *  texture), it can set this flag to true, so Interpreter won't copy the
   *  data from buffer handle to CPU memory. */
  public native void SetAllowBufferHandleOutput(@Cast("bool") boolean allow_buffer_handle_output);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Reset all variable tensors to the default value.
   *  If a variable tensor doesn't have a buffer, reset it to zero.
   *  TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
   *  to the value of the buffer. */
  public native @Cast("TfLiteStatus") int ResetVariableTensors();

  /** Retrieve an operator's description of its work, for profiling purposes. */
  public native @Cast("const char*") BytePointer OpProfilingString(@Const @ByRef TfLiteRegistration op_reg,
                                  @Const TfLiteNode node);

  // Set the value of an external context. TFLite interpreter doesn't take the
  // memory ownership of this external context 'ctx', and the context should
  // outlive the TFLite interpreter.
  
  ///
  ///
  public native void SetExternalContext(@Cast("TfLiteExternalContextType") int type,
                            TfLiteExternalContext ctx);

  /** \brief Assigns (or reassigns) a custom memory allocation for the given
   *  tensor. {@code flags} is a bitmask, see TfLiteCustomAllocationFlags.
   *  The runtime does NOT take ownership of the underlying memory.
   * 
   *  NOTE: User needs to call AllocateTensors() after this.
   *  Invalid/insufficient buffers will cause an error during AllocateTensors or
   *  Invoke (in case of dynamic shapes in the graph).
   * 
   *  Parameters should satisfy the following conditions:
   *  1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent
   *     In general, this is true for I/O tensors & variable tensors.
   *  2. allocation->data has the appropriate permissions for runtime access
   *     (Read-only for inputs, Read-Write for others), and outlives
   *     Interpreter.
   *  3. allocation->bytes >= tensor->bytes.
   *     This condition is checked again if any tensors are resized.
   *  4. allocation->data should be aligned to kDefaultTensorAlignment
   *     defined in lite/util.h. (Currently 64 bytes)
   *     This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is
   *     set through {@code flags}.
   *  \warning This is an experimental API and subject to change. \n */
  public native @Cast("TfLiteStatus") int SetCustomAllocationForTensor(
        int tensor_index, @Const @ByRef TfLiteCustomAllocation allocation,
        @Cast("int64_t") long flags/*=kTfLiteCustomAllocationFlagsNone*/);
  public native @Cast("TfLiteStatus") int SetCustomAllocationForTensor(
        int tensor_index, @Const @ByRef TfLiteCustomAllocation allocation);

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Apply InterpreterOptions which tunes behavior of the interpreter. */
  public native @Cast("TfLiteStatus") int ApplyOptions(InterpreterOptions options);

// #ifndef DOXYGEN_SKIP
  /** \warning This is an experimental API and subject to change. \n
   *  \brief Return the number of subgraphs in the model. */
  public native @Cast("size_t") long subgraphs_size();

  /** \warning This is an experimental API and subject to change. \n
   *  \brief Get a pointer to a subgraph if in bounds. */

  /** \warning This is an experimental API and subject to change. */
  public native Subgraph subgraph(int subgraph_index);

  /** \warning Experimental interface, subject to change. */
  public native @ByRef Subgraph primary_subgraph();

  /** \warning Experimental interface, subject to change. */
// #endif  // DOXYGEN_SKIP

  /** \warning Experimental interface, subject to change. \n
   *  \brief Get the error reporter associated with this interpreter. */
  public native ErrorReporter error_reporter();
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy