All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.bytedeco.tensorflowlite.TfLiteContext Maven / Gradle / Ivy

// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE

package org.bytedeco.tensorflowlite;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.tensorflowlite.global.tensorflowlite.*;


/** {@code TfLiteContext} allows an op to access the tensors.
 * 
 *  {@code TfLiteContext} is a struct that is created by the TF Lite runtime
 *  and passed to the "methods" (C function pointers) in the
 *  {@code TfLiteRegistration} struct that are used to define custom ops and custom
 *  delegate kernels. It contains information and methods (C function pointers)
 *  that can be called by the code implementing a custom op or a custom delegate
 *  kernel. These methods provide access to the context in which that custom op
 *  or custom delegate kernel occurs, such as access to the input and output
 *  tensors for that op, as well as methods for allocating memory buffers
 *  and intermediate tensors, etc.
 * 
 *  See also {@code TfLiteOpaqueContext}, which is an more ABI-stable equivalent. */
@Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class)
public class TfLiteContext extends Pointer {
    static { Loader.load(); }
    /** Default native constructor. */
    public TfLiteContext() { super((Pointer)null); allocate(); }
    /** Native array allocator. Access with {@link Pointer#position(long)}. */
    public TfLiteContext(long size) { super((Pointer)null); allocateArray(size); }
    /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
    public TfLiteContext(Pointer p) { super(p); }
    private native void allocate();
    private native void allocateArray(long size);
    @Override public TfLiteContext position(long position) {
        return (TfLiteContext)super.position(position);
    }
    @Override public TfLiteContext getPointer(long i) {
        return new TfLiteContext((Pointer)this).offsetAddress(i);
    }

  /** Number of tensors in the context. */
  
  ///
  ///
  ///
  ///
  ///
  ///
  public native @Cast("size_t") long tensors_size(); public native TfLiteContext tensors_size(long setter);

  /** The execution plan contains a list of the node indices in execution
   *  order. execution_plan->size is the current number of nodes. And,
   *  execution_plan->data[0] is the first node that needs to be run.
   *  TfLiteDelegates can traverse the current execution plan by iterating
   *  through each member of this array and using GetNodeAndRegistration() to
   *  access details about a node. i.e.
   * 
   * 
   *      TfLiteIntArray* execution_plan;
   *      TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context,
   *                                                      &execution_plan));
   *      for (int exec_index = 0; exec_index < execution_plan->size;
   *            exec_index++) {
   *         int node_index = execution_plan->data[exec_index];
   *         TfLiteNode* node;
   *         TfLiteRegistration* reg;
   *         context->GetNodeAndRegistration(context, node_index, &node, ®);
   *      }
   * 
   *  Note: the memory pointed by '{@code *execution_plan} is OWNED by TfLite runtime.
   *  Future calls to GetExecutionPlan invalidates earlier outputs. The
   *  following code snippet shows the issue of such an invocation pattern.
   *  After calling CheckNode, subsequent access to {@code plan_1st} is undefined.
   * 
   *      void CheckNode(const TfLiteNode* node) {
   *        ...
   *        TfLiteIntArray* plan_2nd;
   *        TF_LITE_ENSURE_STATUS(
   *            context->GetExecutionPlan(context, &plan_2nd)
   *        );
   *        ...
   *      }
   * 
   *      TfLiteIntArray* plan_1st;
   *      TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
   *      for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
   *         int node_index = plan_1st->data[exec_index];
   *         TfLiteNode* node;
   *         TfLiteRegistration* reg;
   *         context->GetNodeAndRegistration(context, node_index, &node, ®);
   *         CheckNode(node);
   *      }
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class GetExecutionPlan_TfLiteContext_PointerPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetExecutionPlan_TfLiteContext_PointerPointer(Pointer p) { super(p); }
      protected GetExecutionPlan_TfLiteContext_PointerPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext context,
                                     @Cast("TfLiteIntArray**") PointerPointer execution_plan);
  }
  public native GetExecutionPlan_TfLiteContext_PointerPointer GetExecutionPlan(); public native TfLiteContext GetExecutionPlan(GetExecutionPlan_TfLiteContext_PointerPointer setter);

  /** An array of tensors in the interpreter context (of length {@code tensors_size}) */
  public native TfLiteTensor tensors(); public native TfLiteContext tensors(TfLiteTensor setter);

  /** opaque full context ptr (an opaque c++ data structure) */
  public native Pointer impl_(); public native TfLiteContext impl_(Pointer setter);

  /** Request memory pointer be resized. Updates dimensions on the tensor.
   *  NOTE: ResizeTensor takes ownership of newSize. */
  public static class ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray(Pointer p) { super(p); }
      protected ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext arg0, TfLiteTensor tensor,
                                 TfLiteIntArray new_size);
  }
  public native ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray ResizeTensor(); public native TfLiteContext ResizeTensor(ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray setter);
  /** Request that an error be reported with format string msg. */
  

  /** Add {@code tensors_to_add} tensors, preserving pre-existing Tensor entries.  If
   *  non-null, the value pointed to by {@code first_new_tensor_index} will be set to
   *  the index of the first new tensor. */
  public static class AddTensors_TfLiteContext_int_IntPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    AddTensors_TfLiteContext_int_IntPointer(Pointer p) { super(p); }
      protected AddTensors_TfLiteContext_int_IntPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext arg0, int tensors_to_add,
                               IntPointer first_new_tensor_index);
  }
  
  ///
  public native AddTensors_TfLiteContext_int_IntPointer AddTensors(); public native TfLiteContext AddTensors(AddTensors_TfLiteContext_int_IntPointer setter);

  /** Get a Tensor node by node_index.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer(Pointer p) { super(p); }
      protected GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(
        TfLiteContext arg0, int node_index, @Cast("TfLiteNode**") PointerPointer node,
        @Cast("TfLiteRegistration**") PointerPointer registration);
  }
  public native GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer GetNodeAndRegistration(); public native TfLiteContext GetNodeAndRegistration(GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer setter);

  /** Replace ops with one or more stub delegate operations. This function
   *  does not take ownership of {@code nodes_to_replace}. */
  public static class ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate(Pointer p) { super(p); }
      protected ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(
        TfLiteContext arg0, @ByVal TfLiteRegistration registration,
        @Const TfLiteIntArray nodes_to_replace, TfLiteDelegate delegate);
  }
  public native ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate ReplaceNodeSubsetsWithDelegateKernels(); public native TfLiteContext ReplaceNodeSubsetsWithDelegateKernels(ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate setter);

  /** Number of threads that are recommended to subsystems like gemmlowp and
   *  eigen. */
  
  ///
  public native int recommended_num_threads(); public native TfLiteContext recommended_num_threads(int setter);

  /** Access external contexts by type.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class GetExternalContext_TfLiteContext_int extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetExternalContext_TfLiteContext_int(Pointer p) { super(p); }
      protected GetExternalContext_TfLiteContext_int() { allocate(); }
      private native void allocate();
      public native TfLiteExternalContext call(TfLiteContext arg0,
                                                 @Cast("TfLiteExternalContextType") int arg1);
  }
  
  ///
  public native GetExternalContext_TfLiteContext_int GetExternalContext(); public native TfLiteContext GetExternalContext(GetExternalContext_TfLiteContext_int setter);
  /** Set the value of a external context. Does not take ownership of the
   *  pointer.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class SetExternalContext_TfLiteContext_int_TfLiteExternalContext extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    SetExternalContext_TfLiteContext_int_TfLiteExternalContext(Pointer p) { super(p); }
      protected SetExternalContext_TfLiteContext_int_TfLiteExternalContext() { allocate(); }
      private native void allocate();
      public native void call(TfLiteContext arg0, @Cast("TfLiteExternalContextType") int arg1,
                               TfLiteExternalContext arg2);
  }
  
  ///
  public native SetExternalContext_TfLiteContext_int_TfLiteExternalContext SetExternalContext(); public native TfLiteContext SetExternalContext(SetExternalContext_TfLiteContext_int_TfLiteExternalContext setter);

  /** Flag for allowing float16 precision for FP32 calculation.
   *  default: false.
   * 
   *  WARNING: This is an experimental API and subject to change. */
  public native @Cast("bool") boolean allow_fp32_relax_to_fp16(); public native TfLiteContext allow_fp32_relax_to_fp16(boolean setter);

  /** Pointer to the op-level profiler, if set; nullptr otherwise. */
  
  ///
  public native Pointer profiler(); public native TfLiteContext profiler(Pointer setter);

  /** Allocate persistent buffer which has the same life time as the
   *  interpreter. Returns {@code nullptr} on failure. The memory is allocated from
   *  heap for TFL, and from tail in TFLM. This method is only available in
   *  {@code Init} or {@code Prepare} stage.
   * 
   *  WARNING: This is an experimental interface that is subject
   *  to change. */
  public static class AllocatePersistentBuffer_TfLiteContext_long extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    AllocatePersistentBuffer_TfLiteContext_long(Pointer p) { super(p); }
      protected AllocatePersistentBuffer_TfLiteContext_long() { allocate(); }
      private native void allocate();
      public native Pointer call(TfLiteContext ctx, @Cast("size_t") long bytes);
  }
  
  ///
  ///
  public native AllocatePersistentBuffer_TfLiteContext_long AllocatePersistentBuffer(); public native TfLiteContext AllocatePersistentBuffer(AllocatePersistentBuffer_TfLiteContext_long setter);

  /** Allocate a buffer which will be deallocated right after invoke phase.
   *  The memory is allocated from heap in TFL, and from volatile arena in TFLM.
   *  This method is only available in invoke stage.
   * 
   *  NOTE: If possible use {@code RequestScratchBufferInArena} method to avoid memory
   *  allocation during inference time.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class AllocateBufferForEval_TfLiteContext_long_PointerPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    AllocateBufferForEval_TfLiteContext_long_PointerPointer(Pointer p) { super(p); }
      protected AllocateBufferForEval_TfLiteContext_long_PointerPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext ctx, @Cast("size_t") long bytes,
                                          @Cast("void**") PointerPointer ptr);
  }
  
  ///
  public native AllocateBufferForEval_TfLiteContext_long_PointerPointer AllocateBufferForEval(); public native TfLiteContext AllocateBufferForEval(AllocateBufferForEval_TfLiteContext_long_PointerPointer setter);

  /** Request a scratch buffer in the arena through static memory planning.
   *  This method is only available in {@code Prepare} stage and the buffer is
   *  allocated by the interpreter between Prepare and Eval stage. In {@code Eval}
   *  stage, {@code GetScratchBuffer} API can be used to fetch the address.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class RequestScratchBufferInArena_TfLiteContext_long_IntPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    RequestScratchBufferInArena_TfLiteContext_long_IntPointer(Pointer p) { super(p); }
      protected RequestScratchBufferInArena_TfLiteContext_long_IntPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext ctx,
                                                @Cast("size_t") long bytes, IntPointer buffer_idx);
  }
  
  ///
  public native RequestScratchBufferInArena_TfLiteContext_long_IntPointer RequestScratchBufferInArena(); public native TfLiteContext RequestScratchBufferInArena(RequestScratchBufferInArena_TfLiteContext_long_IntPointer setter);

  /** Get the scratch buffer pointer.
   *  This method is only available in Eval stage.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class GetScratchBuffer_TfLiteContext_int extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetScratchBuffer_TfLiteContext_int(Pointer p) { super(p); }
      protected GetScratchBuffer_TfLiteContext_int() { allocate(); }
      private native void allocate();
      public native Pointer call(TfLiteContext ctx, int buffer_idx);
  }
  
  ///
  public native GetScratchBuffer_TfLiteContext_int GetScratchBuffer(); public native TfLiteContext GetScratchBuffer(GetScratchBuffer_TfLiteContext_int setter);

  /** Resize the memory pointer of the {@code tensor}. This method behaves the same as
   *  {@code ResizeTensor}, except that it makes a copy of the shape array internally
   *  so the shape array could be deallocated right afterwards.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer(Pointer p) { super(p); }
      protected ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext ctx,
                                         TfLiteTensor tensor, int dims,
                                         @Const IntPointer shape);
  }
  
  ///
  ///
  ///
  public native ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer ResizeTensorExplicit(); public native TfLiteContext ResizeTensorExplicit(ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer setter);

  /** This method provides a preview of post-delegation partitioning. Each
   *  TfLiteDelegateParams in the referenced array corresponds to one instance
   *  of the delegate kernel. Example usage:
   * 
   *      TfLiteIntArray* nodes_to_replace = ...;
   *      TfLiteDelegateParams* params_array;
   *      int num_partitions = 0;
   *      TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
   *         context, delegate, nodes_to_replace, ¶ms_array,
   *         &num_partitions));
   *      for (int idx = 0; idx < num_partitions; idx++) {
   *         const auto& partition_params = params_array[idx];
   *         ...
   *      }
   * 
   *  NOTE: The context owns the memory referenced by partition_params_array. It
   *  will be cleared with another call to PreviewDelegatePartitioning, or after
   *  TfLiteDelegateParams::Prepare returns.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer(Pointer p) { super(p); }
      protected PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(
        TfLiteContext context, @Const TfLiteIntArray nodes_to_replace,
        @Cast("TfLiteDelegateParams**") PointerPointer partition_params_array, IntPointer num_partitions);
  }
  
  ///
  ///
  public native PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer PreviewDelegatePartitioning(); public native TfLiteContext PreviewDelegatePartitioning(PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer setter);

  /** Returns a TfLiteTensor struct for a given index.
   * 
   *  WARNING: This is an experimental interface that is subject to change.
   * 
   *  WARNING: This method may not be available on all platforms. */
  public static class GetTensor_TfLiteContext_int extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetTensor_TfLiteContext_int(Pointer p) { super(p); }
      protected GetTensor_TfLiteContext_int() { allocate(); }
      private native void allocate();
      public native TfLiteTensor call(@Const TfLiteContext context,
                               int tensor_idx);
  }
  
  ///
  ///
  public native GetTensor_TfLiteContext_int GetTensor(); public native TfLiteContext GetTensor(GetTensor_TfLiteContext_int setter);

  /** Returns a TfLiteEvalTensor struct for a given index.
   * 
   *  WARNING: This is an experimental interface that is subject to change.
   * 
   *  WARNING: This method may not be available on all platforms. */
  public static class GetEvalTensor_TfLiteContext_int extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetEvalTensor_TfLiteContext_int(Pointer p) { super(p); }
      protected GetEvalTensor_TfLiteContext_int() { allocate(); }
      private native void allocate();
      public native TfLiteEvalTensor call(@Const TfLiteContext context,
                                       int tensor_idx);
  }
  
  ///
  public native GetEvalTensor_TfLiteContext_int GetEvalTensor(); public native TfLiteContext GetEvalTensor(GetEvalTensor_TfLiteContext_int setter);

  /** Retrieves named metadata buffer from the TFLite model.
   *  Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
   *  Model: that is, there exists a {@code metadata} entry with given {@code name} string.
   *  (see TFLite's schema.fbs).
   *  The corresponding {@code buffer} information is populated in {@code ptr} & {@code bytes}.
   *  The data from {@code ptr} is valid for the lifetime of the Interpreter.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer(Pointer p) { super(p); }
      protected GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(@Const TfLiteContext context,
                                     @Cast("const char*") BytePointer name, @Cast("const char**") PointerPointer ptr,
                                     @Cast("size_t*") SizeTPointer bytes);
  }
  
  ///
  ///
  public native GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer GetModelMetadata(); public native TfLiteContext GetModelMetadata(GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer setter);

  /** Retrieves the corresponding TfLiteContext of a subgraph that the given
   *  subgraph_index points to and switches to the delegate context for that
   *  subgraph. If an invalid subgraph index is given, returns kTfLiteError.
   * 
   *  NOTE: This function is expected to be paired with ReleaseSubgraphContext()
   *  once the delegate preparation is done and/or the delegate context
   *  functions are no longer needed.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class AcquireSubgraphContext_TfLiteContext_int_PointerPointer extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    AcquireSubgraphContext_TfLiteContext_int_PointerPointer(Pointer p) { super(p); }
      protected AcquireSubgraphContext_TfLiteContext_int_PointerPointer() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(
        TfLiteContext context, int subgraph_index,
        @Cast("TfLiteContext**") PointerPointer acquired_context);
  }
  
  ///
  ///
  public native AcquireSubgraphContext_TfLiteContext_int_PointerPointer AcquireSubgraphContext(); public native TfLiteContext AcquireSubgraphContext(AcquireSubgraphContext_TfLiteContext_int_PointerPointer setter);
  /** Releases the subgraph context by switching back to the TFLite kernel
   *  context for the subgraph that the given subgraph_index points to.
   * 
   *  NOTE: This function is expected to be used after AcquireSubgraphContext()
   *  once the delegate preparation is done and/or the delegate context
   *  functions are no longer needed.
   * 
   *  WARNING: This is an experimental interface that is subject to change. */
  public static class ReleaseSubgraphContext_TfLiteContext_int extends FunctionPointer {
      static { Loader.load(); }
      /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
      public    ReleaseSubgraphContext_TfLiteContext_int(Pointer p) { super(p); }
      protected ReleaseSubgraphContext_TfLiteContext_int() { allocate(); }
      private native void allocate();
      public native @Cast("TfLiteStatus") int call(TfLiteContext context,
                                           int subgraph_index);
  }
  public native ReleaseSubgraphContext_TfLiteContext_int ReleaseSubgraphContext(); public native TfLiteContext ReleaseSubgraphContext(ReleaseSubgraphContext_TfLiteContext_int setter);
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy