org.bytedeco.tensorrt.nvinfer.ITimingCache Maven / Gradle / Ivy
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import org.bytedeco.cuda.cudart.*;
import static org.bytedeco.cuda.global.cudart.*;
import org.bytedeco.cuda.cublas.*;
import static org.bytedeco.cuda.global.cublas.*;
import org.bytedeco.cuda.cudnn.*;
import static org.bytedeco.cuda.global.cudnn.*;
import org.bytedeco.cuda.nvrtc.*;
import static org.bytedeco.cuda.global.nvrtc.*;
import static org.bytedeco.tensorrt.global.nvinfer.*;
/**
* \class ITimingCache
*
* \brief Class to handle tactic timing info collected from builder.
*
* The timing cache is created or initialized by IBuilderConfig. It can be shared across builder instances
* to reduce the builder wallclock time.
*
* \warning It is a known issue that the same timing cache doesn't guarantee stable engine build reproducibility
* at optimization level 4 and higher. This issue will be fixed by 2024.
*
* @see IBuilderConfig
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class ITimingCache extends INoCopy {
static { Loader.load(); }
/** Default native constructor. */
public ITimingCache() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public ITimingCache(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ITimingCache(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public ITimingCache position(long position) {
return (ITimingCache)super.position(position);
}
@Override public ITimingCache getPointer(long i) {
return new ITimingCache((Pointer)this).offsetAddress(i);
}
/**
* \brief Serialize a timing cache to IHostMemory object.
*
* This function allows serialization of current timing cache.
*
* @return A pointer to a IHostMemory object that contains a serialized timing cache.
*
* @see IHostMemory
* */
//!
//!
//!
//!
//!
//!
//!
public native @NoException(true) IHostMemory serialize();
/**
* \brief Combine input timing cache into local instance.
*
* This function allows combining entries in the input timing cache to local cache object.
*
* @param inputCache The input timing cache.
* @param ignoreMismatch Whether or not to allow cache verification header mismatch.
*
* @return True if combined successfully, false otherwise.
*
* Append entries in input cache to local cache. Conflicting entries will be skipped
* The input cache must be generated by a TensorRT build of exact same version, otherwise
* combine will be skipped and return false.
* ignoreMismatch must be set to true if combining a timing cache created from a
* different device.
*
* \warning Combining caches generated from devices with different device properties may
* lead to functional/performance bugs!
* */
//!
//!
//!
public native @Cast("bool") @NoException(true) boolean combine(@Const @ByRef ITimingCache inputCache, @Cast("bool") boolean ignoreMismatch);
/**
* \brief Empty the timing cache
*
* @return True if reset successfully, false otherwise.
* */
public native @Cast("bool") @NoException(true) boolean reset();
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy