Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.bytedeco.javacpp.opencv_dnn Maven / Gradle / Ivy
// Targeted by JavaCPP version 1.4: DO NOT EDIT THIS FILE
package org.bytedeco.javacpp;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.opencv_core.*;
import static org.bytedeco.javacpp.opencv_imgproc.*;
public class opencv_dnn extends org.bytedeco.javacpp.presets.opencv_dnn {
static { Loader.load(); }
@Name("std::vector") public static class MatShapeVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public MatShapeVector(Pointer p) { super(p); }
public MatShapeVector(IntPointer value) { this(1); put(0, value); }
public MatShapeVector(IntPointer ... array) { this(array.length); put(array); }
public MatShapeVector() { allocate(); }
public MatShapeVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef MatShapeVector put(@ByRef MatShapeVector x);
public boolean empty() { return size() == 0; }
public native long size();
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
@Index public native @StdVector IntPointer get(@Cast("size_t") long i);
public native MatShapeVector put(@Cast("size_t") long i, IntPointer value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }
public native @Name("operator++") @ByRef Iterator increment();
public native @Name("operator==") boolean equals(@ByRef Iterator it);
public native @Name("operator*") @StdVector IntPointer get();
}
public IntPointer pop_back() {
long size = size();
IntPointer value = get(size - 1);
resize(size - 1);
return value;
}
public MatShapeVector push_back(IntPointer value) {
long size = size();
resize(size + 1);
return put(size, value);
}
public MatShapeVector put(IntPointer value) {
if (size() != 1) { resize(1); }
return put(0, value);
}
public MatShapeVector put(IntPointer ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector >") public static class MatShapeVectorVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public MatShapeVectorVector(Pointer p) { super(p); }
public MatShapeVectorVector(MatShapeVector value) { this(1); put(0, value); }
public MatShapeVectorVector(MatShapeVector ... array) { this(array.length); put(array); }
public MatShapeVectorVector() { allocate(); }
public MatShapeVectorVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef MatShapeVectorVector put(@ByRef MatShapeVectorVector x);
public boolean empty() { return size() == 0; }
public native long size();
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
@Index public native @ByRef MatShapeVector get(@Cast("size_t") long i);
public native MatShapeVectorVector put(@Cast("size_t") long i, MatShapeVector value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }
public native @Name("operator++") @ByRef Iterator increment();
public native @Name("operator==") boolean equals(@ByRef Iterator it);
public native @Name("operator*") @ByRef MatShapeVector get();
}
public MatShapeVector pop_back() {
long size = size();
MatShapeVector value = get(size - 1);
resize(size - 1);
return value;
}
public MatShapeVectorVector push_back(MatShapeVector value) {
long size = size();
resize(size + 1);
return put(size, value);
}
public MatShapeVectorVector put(MatShapeVector value) {
if (size() != 1) { resize(1); }
return put(0, value);
}
public MatShapeVectorVector put(MatShapeVector ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
@Name("std::vector >") public static class RangeVectorVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RangeVectorVector(Pointer p) { super(p); }
public RangeVectorVector(Range[] ... array) { this(array.length); put(array); }
public RangeVectorVector() { allocate(); }
public RangeVectorVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef RangeVectorVector put(@ByRef RangeVectorVector x);
public boolean empty() { return size() == 0; }
public native long size();
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
public boolean empty(@Cast("size_t") long i) { return size(i) == 0; }
public native @Index long size(@Cast("size_t") long i);
public void clear(@Cast("size_t") long i) { resize(i, 0); }
public native @Index void resize(@Cast("size_t") long i, @Cast("size_t") long n);
@Index public native @ByRef Range get(@Cast("size_t") long i, @Cast("size_t") long j);
public native RangeVectorVector put(@Cast("size_t") long i, @Cast("size_t") long j, Range value);
public RangeVectorVector put(Range[] ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
if (size(i) != array[i].length) { resize(i, array[i].length); }
for (int j = 0; j < array[i].length; j++) {
put(i, j, array[i][j]);
}
}
return this;
}
}
@Name("std::vector") public static class MatPointerVector extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public MatPointerVector(Pointer p) { super(p); }
public MatPointerVector(Mat value) { this(1); put(0, value); }
public MatPointerVector(Mat ... array) { this(array.length); put(array); }
public MatPointerVector() { allocate(); }
public MatPointerVector(long n) { allocate(n); }
private native void allocate();
private native void allocate(@Cast("size_t") long n);
public native @Name("operator=") @ByRef MatPointerVector put(@ByRef MatPointerVector x);
public boolean empty() { return size() == 0; }
public native long size();
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
@Index public native Mat get(@Cast("size_t") long i);
public native MatPointerVector put(@Cast("size_t") long i, Mat value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }
public native @Name("operator++") @ByRef Iterator increment();
public native @Name("operator==") boolean equals(@ByRef Iterator it);
public native @Name("operator*") Mat get();
}
public Mat pop_back() {
long size = size();
Mat value = get(size - 1);
resize(size - 1);
return value;
}
public MatPointerVector push_back(Mat value) {
long size = size();
resize(size + 1);
return put(size, value);
}
public MatPointerVector put(Mat value) {
if (size() != 1) { resize(1); }
return put(0, value);
}
public MatPointerVector put(Mat ... array) {
if (size() != array.length) { resize(array.length); }
for (int i = 0; i < array.length; i++) {
put(i, array[i]);
}
return this;
}
}
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #ifndef OPENCV_DNN_HPP
// #define OPENCV_DNN_HPP
// This is an umbrealla header to include into you project.
// We are free to change headers layout in dnn subfolder, so please include
// this header for future compatibility
/** \defgroup dnn Deep Neural Network module
\{
This module contains:
- API for new layers creation, layers are building bricks of neural networks;
- set of built-in most-useful Layers;
- API to constuct and modify comprehensive neural networks from layers;
- functionality for loading serialized networks models from differnet frameworks.
Functionality of this module is designed only for forward pass computations (i. e. network testing).
A network training is in principle not supported.
\}
*/
// #include
// #endif /* OPENCV_DNN_HPP */
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #include
// #include
// #include
// #include
// #ifndef OPENCV_DNN_DNN_DICT_HPP
// #define OPENCV_DNN_DNN_DICT_HPP
@Namespace("cv::dnn") @NoOffset public static class DictValue extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DictValue(Pointer p) { super(p); }
public DictValue(@Const @ByRef DictValue r) { super((Pointer)null); allocate(r); }
private native void allocate(@Const @ByRef DictValue r);
/** Constructs integer scalar */
public DictValue(@Cast("int64") long i/*=0*/) { super((Pointer)null); allocate(i); }
private native void allocate(@Cast("int64") long i/*=0*/);
public DictValue() { super((Pointer)null); allocate(); }
private native void allocate();
/** Constructs integer scalar */
public DictValue(int i) { super((Pointer)null); allocate(i); }
private native void allocate(int i);
/** Constructs floating point scalar */
public DictValue(double p) { super((Pointer)null); allocate(p); }
private native void allocate(double p);
/** Constructs string scalar */
public DictValue(@Str BytePointer s) { super((Pointer)null); allocate(s); }
private native void allocate(@Str BytePointer s);
public DictValue(@Str String s) { super((Pointer)null); allocate(s); }
private native void allocate(@Str String s);
public native int size();
public native @Cast("bool") boolean isInt();
public native @Cast("bool") boolean isString();
public native @Cast("bool") boolean isReal();
public native int getIntValue(int idx/*=-1*/);
public native int getIntValue();
public native double getRealValue(int idx/*=-1*/);
public native double getRealValue();
public native @Str BytePointer getStringValue(int idx/*=-1*/);
public native @Str BytePointer getStringValue();
public native @ByRef @Name("operator =") DictValue put(@Const @ByRef DictValue r);
}
/** \brief This class implements name-value dictionary, values are instances of DictValue. */
@Namespace("cv::dnn") public static class Dict extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public Dict() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Dict(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Dict(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public Dict position(long position) {
return (Dict)super.position(position);
}
/** Checks a presence of the \p key in the dictionary. */
public native @Cast("bool") boolean has(@Str BytePointer key);
public native @Cast("bool") boolean has(@Str String key);
/** If the \p key in the dictionary then returns pointer to its value, else returns NULL. */
public native DictValue ptr(@Str BytePointer key);
public native DictValue ptr(@Str String key);
/** \overload */
/** If the \p key in the dictionary then returns its value, else an error will be generated. */
public native @Const @ByRef DictValue get(@Str BytePointer key);
public native @Const @ByRef DictValue get(@Str String key);
/** \overload */
/** If the \p key in the dictionary then returns its value, else returns \p defaultValue. */
/** Sets new \p value for the \p key, or adds new key-value pair into the dictionary. */
}
/** \} */
// #endif
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #ifndef OPENCV_DNN_DNN_ALL_LAYERS_HPP
// #define OPENCV_DNN_DNN_ALL_LAYERS_HPP
// #include
@Namespace("cv::dnn") public static class BlankLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BlankLayer(Pointer p) { super(p); }
public static native @Ptr Layer create(@Const @ByRef LayerParams params);
}
/** LSTM recurrent layer */
@Namespace("cv::dnn") public static class LSTMLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LSTMLayer(Pointer p) { super(p); }
/** Creates instance of LSTM layer */
public static native @Ptr LSTMLayer create(@Const @ByRef LayerParams params);
/** @deprecated Use LayerParams::blobs instead.
\brief Set trained weights for LSTM layer.
LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights.
Let \f$x_t\f$ be current input, \f$h_t\f$ be current output, \f$c_t\f$ be current state.
Than current output and current cell state is computed as follows:
\f{eqnarray*}{
h_t &= o_t \odot tanh(c_t), \\
c_t &= f_t \odot c_{t-1} + i_t \odot g_t, \\
\f}
where \f$\odot\f$ is per-element multiply operation and \f$i_t, f_t, o_t, g_t\f$ is internal gates that are computed using learned wights.
Gates are computed as follows:
\f{eqnarray*}{
i_t &= sigmoid&(W_{xi} x_t + W_{hi} h_{t-1} + b_i), \\
f_t &= sigmoid&(W_{xf} x_t + W_{hf} h_{t-1} + b_f), \\
o_t &= sigmoid&(W_{xo} x_t + W_{ho} h_{t-1} + b_o), \\
g_t &= tanh &(W_{xg} x_t + W_{hg} h_{t-1} + b_g), \\
\f}
where \f$W_{x?}\f$, \f$W_{h?}\f$ and \f$b_{?}\f$ are learned weights represented as matrices:
\f$W_{x?} \in R^{N_h \times N_x}\f$, \f$W_{h?} \in R^{N_h \times N_h}\f$, \f$b_? \in R^{N_h}\f$.
For simplicity and performance purposes we use \f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] \f$
(i.e. \f$W_x\f$ is vertical contacentaion of \f$ W_{x?} \f$), \f$ W_x \in R^{4N_h \times N_x} \f$.
The same for \f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} \f$
and for \f$ b = [b_i; b_f, b_o, b_g]\f$, \f$b \in R^{4N_h} \f$.
@param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to abovemtioned notation is \f$ W_h \f$)
@param Wx is matrix defining how current input is transformed to internal gates (i.e. according to abovemtioned notation is \f$ W_x \f$)
@param b is bias vector (i.e. according to abovemtioned notation is \f$ b \f$)
*/
public native void setWeights(@Const @ByRef Mat Wh, @Const @ByRef Mat Wx, @Const @ByRef Mat b);
/** \brief Specifies shape of output blob which will be [[{@code T}], {@code N}] + \p outTailShape.
* \details If this parameter is empty or unset then \p outTailShape = [{@code Wh}.size(0)] will be used,
* where {@code Wh} is parameter from setWeights().
*/
public native void setOutShape(@Const @StdVector @ByRef(nullValue = "cv::dnn::MatShape()") IntPointer outTailShape);
public native void setOutShape();
/** @deprecated Use flag {@code produce_cell_output} in LayerParams.
* \brief Specifies either interpet first dimension of input blob as timestamp dimenion either as sample.
*
* If flag is set to true then shape of input blob will be interpeted as [{@code T}, {@code N}, {@code [data dims]}] where {@code T} specifies number of timpestamps, {@code N} is number of independent streams.
* In this case each forward() call will iterate through {@code T} timestamps and update layer's state {@code T} times.
*
* If flag is set to false then shape of input blob will be interpeted as [{@code N}, {@code [data dims]}].
* In this case each forward() call will make one iteration and produce one timestamp with shape [{@code N}, {@code [out dims]}].
*/
public native void setUseTimstampsDim(@Cast("bool") boolean use/*=true*/);
public native void setUseTimstampsDim();
/** @deprecated Use flag {@code use_timestamp_dim} in LayerParams.
* \brief If this flag is set to true then layer will produce \f$ c_t \f$ as second output.
* \details Shape of the second output is the same as first output.
*/
public native void setProduceCellOutput(@Cast("bool") boolean produce/*=false*/);
public native void setProduceCellOutput();
/* In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$).
* @param input should contain packed values @f$x_t@f$
* @param output contains computed outputs: @f$h_t@f$ (and @f$c_t@f$ if setProduceCellOutput() flag was set to true).
*
* If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`],
* where `T` specifies number of timpestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
*
* If setUseTimstampsDim() is set to fase then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
* (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
*/
public native int inputNameToIndex(@Str BytePointer inputName);
public native int inputNameToIndex(@Str String inputName);
public native int outputNameToIndex(@Str BytePointer outputName);
public native int outputNameToIndex(@Str String outputName);
}
/** \brief Classical recurrent layer
Accepts two inputs \f$x_t\f$ and \f$h_{t-1}\f$ and compute two outputs \f$o_t\f$ and \f$h_t\f$.
- input: should contain packed input \f$x_t\f$.
- output: should contain output \f$o_t\f$ (and \f$h_t\f$ if setProduceHiddenOutput() is set to true).
input[0] should have shape [{@code T}, {@code N}, {@code data_dims}] where {@code T} and {@code N} is number of timestamps and number of independent samples of \f$x_t\f$ respectively.
output[0] will have shape [{@code T}, {@code N}, \f$N_o\f$], where \f$N_o\f$ is number of rows in \f$ W_{xo} \f$ matrix.
If setProduceHiddenOutput() is set to true then \p output[1] will contain a Mat with shape [{@code T}, {@code N}, \f$N_h\f$], where \f$N_h\f$ is number of rows in \f$ W_{hh} \f$ matrix.
*/
@Namespace("cv::dnn") public static class RNNLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RNNLayer(Pointer p) { super(p); }
/** Creates instance of RNNLayer */
public static native @Ptr RNNLayer create(@Const @ByRef LayerParams params);
/** Setups learned weights.
Recurrent-layer behavior on each step is defined by current input \f$ x_t \f$, previous state \f$ h_t \f$ and learned weights as follows:
\f{eqnarray*}{
h_t &= tanh&(W_{hh} h_{t-1} + W_{xh} x_t + b_h), \\
o_t &= tanh&(W_{ho} h_t + b_o),
\f}
@param Wxh is \f$ W_{xh} \f$ matrix
@param bh is \f$ b_{h} \f$ vector
@param Whh is \f$ W_{hh} \f$ matrix
@param Who is \f$ W_{xo} \f$ matrix
@param bo is \f$ b_{o} \f$ vector
*/
public native void setWeights(@Const @ByRef Mat Wxh, @Const @ByRef Mat bh, @Const @ByRef Mat Whh, @Const @ByRef Mat Who, @Const @ByRef Mat bo);
/** \brief If this flag is set to true then layer will produce \f$ h_t \f$ as second output.
* \details Shape of the second output is the same as first output.
*/
public native void setProduceHiddenOutput(@Cast("bool") boolean produce/*=false*/);
public native void setProduceHiddenOutput();
}
@Namespace("cv::dnn") @NoOffset public static class BaseConvolutionLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BaseConvolutionLayer(Pointer p) { super(p); }
public native @ByRef Size kernel(); public native BaseConvolutionLayer kernel(Size kernel);
public native @ByRef Size stride(); public native BaseConvolutionLayer stride(Size stride);
public native @ByRef Size pad(); public native BaseConvolutionLayer pad(Size pad);
public native @ByRef Size dilation(); public native BaseConvolutionLayer dilation(Size dilation);
public native @ByRef Size adjustPad(); public native BaseConvolutionLayer adjustPad(Size adjustPad);
public native @Str BytePointer padMode(); public native BaseConvolutionLayer padMode(BytePointer padMode);
public native int numOutput(); public native BaseConvolutionLayer numOutput(int numOutput);
}
@Namespace("cv::dnn") public static class ConvolutionLayer extends BaseConvolutionLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ConvolutionLayer(Pointer p) { super(p); }
public static native @Ptr BaseConvolutionLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class DeconvolutionLayer extends BaseConvolutionLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DeconvolutionLayer(Pointer p) { super(p); }
public static native @Ptr BaseConvolutionLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class LRNLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LRNLayer(Pointer p) { super(p); }
public native @Name("type") int lrnType(); public native LRNLayer lrnType(int lrnType);
public native int size(); public native LRNLayer size(int size);
public native float alpha(); public native LRNLayer alpha(float alpha);
public native float beta(); public native LRNLayer beta(float beta);
public native float bias(); public native LRNLayer bias(float bias);
public native @Cast("bool") boolean normBySize(); public native LRNLayer normBySize(boolean normBySize);
public static native @Ptr LRNLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class PoolingLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PoolingLayer(Pointer p) { super(p); }
public native @Name("type") int poolingType(); public native PoolingLayer poolingType(int poolingType);
public native @ByRef Size kernel(); public native PoolingLayer kernel(Size kernel);
public native @ByRef Size stride(); public native PoolingLayer stride(Size stride);
public native @ByRef Size pad(); public native PoolingLayer pad(Size pad);
public native @Cast("bool") boolean globalPooling(); public native PoolingLayer globalPooling(boolean globalPooling);
public native @Cast("bool") boolean computeMaxIdx(); public native PoolingLayer computeMaxIdx(boolean computeMaxIdx);
public native @Str BytePointer padMode(); public native PoolingLayer padMode(BytePointer padMode);
public native @Cast("bool") boolean ceilMode(); public native PoolingLayer ceilMode(boolean ceilMode);
// ROIPooling parameters.
public native @ByRef Size pooledSize(); public native PoolingLayer pooledSize(Size pooledSize);
public native float spatialScale(); public native PoolingLayer spatialScale(float spatialScale);
// PSROIPooling parameters.
public native int psRoiOutChannels(); public native PoolingLayer psRoiOutChannels(int psRoiOutChannels);
public static native @Ptr PoolingLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class SoftmaxLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SoftmaxLayer(Pointer p) { super(p); }
public native @Cast("bool") boolean logSoftMax(); public native SoftmaxLayer logSoftMax(boolean logSoftMax);
public static native @Ptr SoftmaxLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class InnerProductLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public InnerProductLayer(Pointer p) { super(p); }
public native int axis(); public native InnerProductLayer axis(int axis);
public static native @Ptr InnerProductLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class MVNLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public MVNLayer(Pointer p) { super(p); }
public native float eps(); public native MVNLayer eps(float eps);
public native @Cast("bool") boolean normVariance(); public native MVNLayer normVariance(boolean normVariance);
public native @Cast("bool") boolean acrossChannels(); public native MVNLayer acrossChannels(boolean acrossChannels);
public static native @Ptr MVNLayer create(@Const @ByRef LayerParams params);
}
/* Reshaping */
@Namespace("cv::dnn") @NoOffset public static class ReshapeLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ReshapeLayer(Pointer p) { super(p); }
public native @StdVector @ByRef IntPointer newShapeDesc(); public native ReshapeLayer newShapeDesc(IntPointer newShapeDesc);
public native @ByRef Range newShapeRange(); public native ReshapeLayer newShapeRange(Range newShapeRange);
public static native @Ptr ReshapeLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class FlattenLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public FlattenLayer(Pointer p) { super(p); }
public static native @Ptr FlattenLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class ConcatLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ConcatLayer(Pointer p) { super(p); }
public native int axis(); public native ConcatLayer axis(int axis);
/**
* \brief Add zero padding in case of concatenation of blobs with different
* spatial sizes.
*
* Details: https://github.com/torch/nn/blob/master/doc/containers.md#depthconcat
*/
public native @Cast("bool") boolean padding(); public native ConcatLayer padding(boolean padding);
public static native @Ptr ConcatLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class SplitLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SplitLayer(Pointer p) { super(p); }
/** Number of copies that will be produced (is ignored when negative). */
public native int outputsCount(); public native SplitLayer outputsCount(int outputsCount);
public static native @Ptr SplitLayer create(@Const @ByRef LayerParams params);
}
/**
* Slice layer has several modes:
* 1. Caffe mode
* @param [in] axis Axis of split operation
* @param [in] slice_point Array of split points
*
* Number of output blobs equals to number of split points plus one. The
* first blob is a slice on input from 0 to \p slice_point[0] - 1 by \p axis,
* the second output blob is a slice of input from \p slice_point[0] to
* \p slice_point[1] - 1 by \p axis and the last output blob is a slice of
* input from \p slice_point[-1] up to the end of \p axis size.
*
* 2. TensorFlow mode
* @param begin Vector of start indices
* @param size Vector of sizes
*
* More convinient numpy-like slice. One and only output blob
* is a slice {@code input[begin[0]:begin[0]+size[0], begin[1]:begin[1]+size[1], ...]}
*
* 3. Torch mode
* @param axis Axis of split operation
*
* Split input blob on the equal parts by \p axis.
*/
@Namespace("cv::dnn") @NoOffset public static class SliceLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SliceLayer(Pointer p) { super(p); }
/**
* \brief Vector of slice ranges.
*
* The first dimension equals number of output blobs.
* Inner vector has slice ranges for the first number of input dimensions.
*/
public native @ByRef RangeVectorVector sliceRanges(); public native SliceLayer sliceRanges(RangeVectorVector sliceRanges);
public native int axis(); public native SliceLayer axis(int axis);
public static native @Ptr SliceLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class PermuteLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PermuteLayer(Pointer p) { super(p); }
public static native @Ptr PermuteLayer create(@Const @ByRef LayerParams params);
}
/**
* \brief Adds extra values for specific axes.
* @param paddings Vector of paddings in format
*
{@code
* [ pad_before, pad_after, // [0]th dimension
* pad_before, pad_after, // [1]st dimension
* ...
* pad_before, pad_after ] // [n]th dimension
* }
* that represents number of padded values at every dimension
* starting from the first one. The rest of dimensions won't
* be padded.
* @param value Value to be padded. Defaults to zero.
* @param type Padding type: 'constant', 'reflect'
* @param input_dims Torch's parameter. If \p input_dims is not equal to the
* actual input dimensionality then the {@code [0]th} dimension
* is considered as a batch dimension and \p paddings are shifted
* to a one dimension. Defaults to {@code -1} that means padding
* corresponding to \p paddings.
*/
@Namespace("cv::dnn") public static class PaddingLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PaddingLayer(Pointer p) { super(p); }
public static native @Ptr PaddingLayer create(@Const @ByRef LayerParams params);
}
/* Activations */
@Namespace("cv::dnn") public static class ActivationLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ActivationLayer(Pointer p) { super(p); }
public native void forwardSlice(@Const FloatPointer src, FloatPointer dst, int len,
@Cast("size_t") long outPlaneSize, int cn0, int cn1);
public native void forwardSlice(@Const FloatBuffer src, FloatBuffer dst, int len,
@Cast("size_t") long outPlaneSize, int cn0, int cn1);
public native void forwardSlice(@Const float[] src, float[] dst, int len,
@Cast("size_t") long outPlaneSize, int cn0, int cn1);
}
@Namespace("cv::dnn") @NoOffset public static class ReLULayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ReLULayer(Pointer p) { super(p); }
public native float negativeSlope(); public native ReLULayer negativeSlope(float negativeSlope);
public static native @Ptr ReLULayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ReLU6Layer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ReLU6Layer(Pointer p) { super(p); }
public static native @Ptr ReLU6Layer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ChannelsPReLULayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ChannelsPReLULayer(Pointer p) { super(p); }
public static native @Ptr Layer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ELULayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ELULayer(Pointer p) { super(p); }
public static native @Ptr ELULayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class TanHLayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public TanHLayer(Pointer p) { super(p); }
public static native @Ptr TanHLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class SigmoidLayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public SigmoidLayer(Pointer p) { super(p); }
public static native @Ptr SigmoidLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class BNLLLayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BNLLLayer(Pointer p) { super(p); }
public static native @Ptr BNLLLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class AbsLayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AbsLayer(Pointer p) { super(p); }
public static native @Ptr AbsLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class PowerLayer extends ActivationLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PowerLayer(Pointer p) { super(p); }
public native float power(); public native PowerLayer power(float power);
public native float scale(); public native PowerLayer scale(float scale);
public native float shift(); public native PowerLayer shift(float shift);
public static native @Ptr PowerLayer create(@Const @ByRef LayerParams params);
}
/* Layers used in semantic segmentation */
@Namespace("cv::dnn") @NoOffset public static class CropLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public CropLayer(Pointer p) { super(p); }
public native int startAxis(); public native CropLayer startAxis(int startAxis);
public native @StdVector IntPointer offset(); public native CropLayer offset(IntPointer offset);
public static native @Ptr CropLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class EltwiseLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public EltwiseLayer(Pointer p) { super(p); }
public static native @Ptr EltwiseLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class BatchNormLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BatchNormLayer(Pointer p) { super(p); }
public native @Cast("bool") boolean hasWeights(); public native BatchNormLayer hasWeights(boolean hasWeights);
public native @Cast("bool") boolean hasBias(); public native BatchNormLayer hasBias(boolean hasBias);
public native float epsilon(); public native BatchNormLayer epsilon(float epsilon);
public native void getScaleShift(@ByRef Mat scale, @ByRef Mat shift);
public static native @Ptr BatchNormLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class MaxUnpoolLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public MaxUnpoolLayer(Pointer p) { super(p); }
public native @ByRef Size poolKernel(); public native MaxUnpoolLayer poolKernel(Size poolKernel);
public native @ByRef Size poolPad(); public native MaxUnpoolLayer poolPad(Size poolPad);
public native @ByRef Size poolStride(); public native MaxUnpoolLayer poolStride(Size poolStride);
public static native @Ptr MaxUnpoolLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") @NoOffset public static class ScaleLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ScaleLayer(Pointer p) { super(p); }
public native @Cast("bool") boolean hasBias(); public native ScaleLayer hasBias(boolean hasBias);
public static native @Ptr ScaleLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ShiftLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ShiftLayer(Pointer p) { super(p); }
public static native @Ptr ShiftLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class PriorBoxLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public PriorBoxLayer(Pointer p) { super(p); }
public static native @Ptr PriorBoxLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ReorgLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ReorgLayer(Pointer p) { super(p); }
public static native @Ptr ReorgLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class RegionLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public RegionLayer(Pointer p) { super(p); }
public static native @Ptr RegionLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class DetectionOutputLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public DetectionOutputLayer(Pointer p) { super(p); }
public static native @Ptr DetectionOutputLayer create(@Const @ByRef LayerParams params);
}
/**
* \brief \f$ L_p \f$ - normalization layer.
* @param p Normalization factor. The most common {@code p = 1} for \f$ L_1 \f$ -
* normalization or {@code p = 2} for \f$ L_2 \f$ - normalization or a custom one.
* @param eps Parameter \f$ \epsilon \f$ to prevent a division by zero.
* @param across_spatial If true, normalize an input across all non-batch dimensions.
* Otherwise normalize an every channel separately.
*
* Across spatial:
* \f[
* norm = \sqrt[p]{\epsilon + \sum_{x, y, c} |src(x, y, c)|^p } \\
* dst(x, y, c) = \frac{ src(x, y, c) }{norm}
* \f]
*
* Channel wise normalization:
* \f[
* norm(c) = \sqrt[p]{\epsilon + \sum_{x, y} |src(x, y, c)|^p } \\
* dst(x, y, c) = \frac{ src(x, y, c) }{norm(c)}
* \f]
*
* Where {@code x, y} - spatial cooridnates, {@code c} - channel.
*
* An every sample in the batch is normalized separately. Optionally,
* output is scaled by the trained parameters.
*/
@Namespace("cv::dnn") @NoOffset public static class NormalizeBBoxLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public NormalizeBBoxLayer(Pointer p) { super(p); }
public native float pnorm(); public native NormalizeBBoxLayer pnorm(float pnorm);
public native float epsilon(); public native NormalizeBBoxLayer epsilon(float epsilon);
public native @Cast("bool") boolean acrossSpatial(); public native NormalizeBBoxLayer acrossSpatial(boolean acrossSpatial);
public static native @Ptr NormalizeBBoxLayer create(@Const @ByRef LayerParams params);
}
/**
* \brief Resize input 4-dimensional blob by nearest neghbor strategy.
*
* Layer is used to support TensorFlow's resize_nearest_neighbor op.
*/
@Namespace("cv::dnn") public static class ResizeNearestNeighborLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ResizeNearestNeighborLayer(Pointer p) { super(p); }
public static native @Ptr ResizeNearestNeighborLayer create(@Const @ByRef LayerParams params);
}
@Namespace("cv::dnn") public static class ProposalLayer extends Layer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ProposalLayer(Pointer p) { super(p); }
public static native @Ptr ProposalLayer create(@Const @ByRef LayerParams params);
}
/** \}
* \} */
// #endif
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #ifndef OPENCV_DNN_DNN_HPP
// #define OPENCV_DNN_DNN_HPP
// #include
// #include
// #if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS
// #define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_v3 {
// #define CV__DNN_EXPERIMENTAL_NS_END }
// #else
// #define CV__DNN_EXPERIMENTAL_NS_BEGIN
// #define CV__DNN_EXPERIMENTAL_NS_END
// #endif
// #include
/** \addtogroup dnn
* \{ */
/**
* \brief Enum of computation backends supported by layers.
*/
/** enum cv::dnn::Backend */
public static final int
DNN_BACKEND_DEFAULT = 0,
DNN_BACKEND_HALIDE = 1;
/**
* \brief Enum of target devices for computations.
*/
/** enum cv::dnn::Target */
public static final int
DNN_TARGET_CPU = 0,
DNN_TARGET_OPENCL = 1;
/** \brief This class provides all data needed to initialize layer.
*
* It includes dictionary with scalar params (which can be readed by using Dict interface),
* blob params #blobs and optional meta information: #name and #type of layer instance.
*/
@Namespace("cv::dnn") @NoOffset public static class LayerParams extends Dict {
static { Loader.load(); }
/** Default native constructor. */
public LayerParams() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public LayerParams(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LayerParams(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public LayerParams position(long position) {
return (LayerParams)super.position(position);
}
//TODO: Add ability to name blob params
/** List of learned parameters stored as blobs. */
public native @ByRef MatVector blobs(); public native LayerParams blobs(MatVector blobs);
/** Name of the layer instance (optional, can be used internal purposes). */
public native @Str BytePointer name(); public native LayerParams name(BytePointer name);
/** Type name which was used for creating layer by layer factory (optional). */
public native @Str BytePointer type(); public native LayerParams type(BytePointer type);
}
/**
* \brief Derivatives of this class encapsulates functions of certain backends.
*/
@Namespace("cv::dnn") @NoOffset public static class BackendNode extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BackendNode(Pointer p) { super(p); }
public BackendNode(int backendId) { super((Pointer)null); allocate(backendId); }
private native void allocate(int backendId);
/** Backend identifier. */
public native int backendId(); public native BackendNode backendId(int backendId);
}
/**
* \brief Derivatives of this class wraps cv::Mat for different backends and targets.
*/
@Namespace("cv::dnn") @NoOffset public static class BackendWrapper extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BackendWrapper(Pointer p) { super(p); }
/**
* \brief Wrap cv::Mat for specific backend and target.
* @param [in] targetId Target identifier.
* @param [in] m cv::Mat for wrapping.
*
* Make CPU->GPU data transfer if it's require for the target.
*/
/**
* \brief Make wrapper for reused cv::Mat.
* @param [in] base Wrapper of cv::Mat that will be reused.
* @param [in] shape Specific shape.
*
* Initialize wrapper from another one. It'll wrap the same host CPU
* memory and mustn't allocate memory on device(i.e. GPU). It might
* has different shape. Use in case of CPU memory reusing for reuse
* associented memory on device too.
*/
/**
* \brief Transfer data to CPU host memory.
*/
public native void copyToHost();
/**
* \brief Indicate that an actual data is on CPU.
*/
public native void setHostDirty();
/** Backend identifier. */
public native int backendId(); public native BackendWrapper backendId(int backendId);
/** Target identifier. */
public native int targetId(); public native BackendWrapper targetId(int targetId);
}
/** \brief This interface class allows to build new Layers - are building blocks of networks.
*
* Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs.
* Also before using the new layer into networks you must register your layer by using one of \ref dnnLayerFactory "LayerFactory" macros.
*/
@Namespace("cv::dnn") @NoOffset public static class Layer extends Algorithm {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Layer(Pointer p) { super(p); }
/** List of learned parameters must be stored here to allow read them by using Net::getParam(). */
public native @ByRef MatVector blobs(); public native Layer blobs(MatVector blobs);
/** \brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param [in] input vector of already allocated input blobs
* @param [out] output vector of already allocated output blobs
*
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
public native void finalize(@Const @ByRef MatPointerVector input, @ByRef MatVector output);
/** \brief Given the \p input blobs, computes the output \p blobs.
* @param [in] input the input blobs.
* @param [out] output allocated output blobs, which will store results of the computation.
* @param [out] internals allocated internal blobs
*/
public native void forward(@ByRef MatPointerVector input, @ByRef MatVector output, @ByRef MatVector internals);
/** \brief Given the \p input blobs, computes the output \p blobs.
* @param [in] inputs the input blobs.
* @param [out] outputs allocated output blobs, which will store results of the computation.
* @param [out] internals allocated internal blobs
*/
public native void forward(@ByVal MatVector inputs, @ByVal MatVector outputs, @ByVal MatVector internals);
public native void forward(@ByVal UMatVector inputs, @ByVal UMatVector outputs, @ByVal UMatVector internals);
public native void forward(@ByVal GpuMatVector inputs, @ByVal GpuMatVector outputs, @ByVal GpuMatVector internals);
/** \brief Given the \p input blobs, computes the output \p blobs.
* @param [in] inputs the input blobs.
* @param [out] outputs allocated output blobs, which will store results of the computation.
* @param [out] internals allocated internal blobs
*/
public native void forward_fallback(@ByVal MatVector inputs, @ByVal MatVector outputs, @ByVal MatVector internals);
public native void forward_fallback(@ByVal UMatVector inputs, @ByVal UMatVector outputs, @ByVal UMatVector internals);
public native void forward_fallback(@ByVal GpuMatVector inputs, @ByVal GpuMatVector outputs, @ByVal GpuMatVector internals);
/** \brief \overload */
public native void finalize(@Const @ByRef MatVector inputs, @ByRef MatVector outputs);
/** \brief \overload */
public native @ByVal MatVector finalize(@Const @ByRef MatVector inputs);
/** \brief Allocates layer and computes output. */
public native void run(@Const @ByRef MatVector inputs, @ByRef MatVector outputs,
@ByRef MatVector internals);
/** \brief Returns index of input blob into the input array.
* @param inputName label of input blob
*
* Each layer input and output can be labeled to easily identify them using "%[.output_name]" notation.
* This method maps label of input blob to its index into input vector.
*/
public native int inputNameToIndex(@Str BytePointer inputName);
public native int inputNameToIndex(@Str String inputName);
/** \brief Returns index of output blob in output array.
* @see inputNameToIndex()
*/
public native int outputNameToIndex(@Str BytePointer outputName);
public native int outputNameToIndex(@Str String outputName);
/**
* \brief Ask layer if it support specific backend for doing computations.
* @param [in] backendId computation backend identifier.
* @see Backend
*/
public native @Cast("bool") boolean supportBackend(int backendId);
/**
* \brief Returns Halide backend node.
* @param [in] inputs Input Halide buffers.
* @see BackendNode, BackendWrapper
*
* Input buffers should be exactly the same that will be used in forward invocations.
* Despite we can use Halide::ImageParam based on input shape only,
* it helps prevent some memory management issues (if something wrong,
* Halide tests will be failed).
*/
/**
* \brief Automatic Halide scheduling based on layer hyper-parameters.
* @param [in] node Backend node with Halide functions.
* @param [in] inputs Blobs that will be used in forward invocations.
* @param [in] outputs Blobs that will be used in forward invocations.
* @param [in] targetId Target identifier
* @see BackendNode, Target
*
* Layer don't use own Halide::Func members because we can have applied
* layers fusing. In this way the fused function should be scheduled.
*/
public native void applyHalideScheduler(@Ptr BackendNode node,
@Const @ByRef MatPointerVector inputs,
@Const @ByRef MatVector outputs,
int targetId);
/**
* \brief Implement layers fusing.
* @param [in] node Backend node of bottom layer.
* @see BackendNode
*
* Actual for graph-based backends. If layer attached successfully,
* returns non-empty cv::Ptr to node of the same backend.
* Fuse only over the last function.
*/
public native @Ptr BackendNode tryAttach(@Ptr BackendNode node);
/**
* \brief Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case.
* @param [in] layer The subsequent activation layer.
*
* Returns true if the activation layer has been attached successfully.
*/
public native @Cast("bool") boolean setActivation(@Ptr ActivationLayer layer);
/**
* \brief Tries to attach to the layer the subsequent batch normalization layer, i.e. do the layer fusion in a partial case.
* @param [in] layer The subsequent batch normalization layer.
*
* Returns true if the batch normalization layer has been attached successfully.
*/
public native @Cast("bool") boolean setBatchNorm(@Ptr BatchNormLayer layer);
/**
* \brief Tries to attach to the layer the subsequent scaling layer, i.e. do the layer fusion in a partial case.
* @param [in] layer The subsequent scaling layer.
*
* Returns true if the scaling layer has been attached successfully.
*/
public native @Cast("bool") boolean setScale(@Ptr ScaleLayer layer);
/**
* \brief "Deattaches" all the layers, attached to particular layer.
*/
public native void unsetAttached();
public native @Cast("bool") boolean getMemoryShapes(@Const @ByRef MatShapeVector inputs,
int requiredOutputs,
@ByRef MatShapeVector outputs,
@ByRef MatShapeVector internals);
public native @Cast("int64") long getFLOPS(@Const @ByRef MatShapeVector inputs,
@Const @ByRef MatShapeVector outputs);
/** Name of the layer instance, can be used for logging or other internal purposes. */
public native @Str BytePointer name(); public native Layer name(BytePointer name);
/** Type name which was used for creating layer by layer factory. */
public native @Str BytePointer type(); public native Layer type(BytePointer type);
/** prefer target for layer forwarding */
public native int preferableTarget(); public native Layer preferableTarget(int preferableTarget);
/** Initializes only #name, #type and #blobs fields. */
public native void setParamsFrom(@Const @ByRef LayerParams params);
}
/** \brief This class allows to create and manipulate comprehensive artificial neural networks.
*
* Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,
* and edges specify relationships between layers inputs and outputs.
*
* Each network layer has unique integer id and unique string name inside its network.
* LayerId can store either layer name or layer id.
*
* This class supports reference counting of its instances, i. e. copies point to the same instance.
*/
@Namespace("cv::dnn") @NoOffset public static class Net extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Net(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Net(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public Net position(long position) {
return (Net)super.position(position);
}
/** Default constructor. */
public Net() { super((Pointer)null); allocate(); }
private native void allocate();
/** Returns true if there are no layers in the network. */
public native @Cast("bool") boolean empty();
/** \brief Adds new layer to the net.
* @param name unique name of the adding layer.
* @param type typename of the adding layer (type must be registered in LayerRegister).
* @param params parameters which will be used to initialize the creating layer.
* @return unique identifier of created layer, or -1 if a failure will happen.
*/
public native int addLayer(@Str BytePointer name, @Str BytePointer type, @ByRef LayerParams params);
public native int addLayer(@Str String name, @Str String type, @ByRef LayerParams params);
/** \brief Adds new layer and connects its first input to the first output of previously added layer.
* @see addLayer()
*/
public native int addLayerToPrev(@Str BytePointer name, @Str BytePointer type, @ByRef LayerParams params);
public native int addLayerToPrev(@Str String name, @Str String type, @ByRef LayerParams params);
/** \brief Converts string name of the layer to the integer identifier.
* @return id of the layer, or -1 if the layer wasn't found.
*/
public native int getLayerId(@Str BytePointer layer);
public native int getLayerId(@Str String layer);
public native @ByVal StringVector getLayerNames();
/** \brief Container for strings and integers. */
/** \brief Returns pointer to layer with specified id or name which the network use. */
public native @Ptr Layer getLayer(@ByVal @Cast("cv::dnn::Net::LayerId*") DictValue layerId);
/** \brief Returns pointers to input layers of specific layer. */ // FIXIT: CV_WRAP
/** \brief Delete layer for the network (not implemented yet) */
public native void deleteLayer(@ByVal @Cast("cv::dnn::Net::LayerId*") DictValue layer);
/** \brief Connects output of the first layer to input of the second layer.
* @param outPin descriptor of the first layer output.
* @param inpPin descriptor of the second layer input.
*
* Descriptors have the following template <layer_name>[.input_number] :
* - the first part of the template layer_name is sting name of the added layer.
* If this part is empty then the network input pseudo layer will be used;
* - the second optional part of the template input_number
* is either number of the layer input, either label one.
* If this part is omitted then the first layer input will be used.
*
* @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
*/
public native void connect(@Str BytePointer outPin, @Str BytePointer inpPin);
public native void connect(@Str String outPin, @Str String inpPin);
/** \brief Connects #\p outNum output of the first layer to #\p inNum input of the second layer.
* @param outLayerId identifier of the first layer
* @param inpLayerId identifier of the second layer
* @param outNum number of the first layer output
* @param inpNum number of the second layer input
*/
public native void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
/** \brief Sets outputs names of the network input pseudo layer.
*
* Each net always has special own the network input pseudo layer with id=0.
* This layer stores the user blobs only and don't make any computations.
* In fact, this layer provides the only way to pass user data into the network.
* As any other layer, this layer can label its outputs and this function provides an easy way to do this.
*/
public native void setInputsNames(@Const @ByRef StringVector inputBlobNames);
/** \brief Runs forward pass to compute output of layer with name \p outputName.
* @param outputName name for layer which output is needed to get
* @return blob for first output of specified layer.
* \details By default runs forward pass for the whole network.
*/
public native @ByVal Mat forward(@Str BytePointer outputName/*=cv::String()*/);
public native @ByVal Mat forward();
public native @ByVal Mat forward(@Str String outputName/*=cv::String()*/);
/** \brief Runs forward pass to compute output of layer with name \p outputName.
* @param outputBlobs contains all output blobs for specified layer.
* @param outputName name for layer which output is needed to get
* \details If \p outputName is empty, runs forward pass for the whole network.
*/
public native void forward(@ByVal MatVector outputBlobs, @Str BytePointer outputName/*=cv::String()*/);
public native void forward(@ByVal MatVector outputBlobs);
public native void forward(@ByVal UMatVector outputBlobs, @Str String outputName/*=cv::String()*/);
public native void forward(@ByVal UMatVector outputBlobs);
public native void forward(@ByVal GpuMatVector outputBlobs, @Str BytePointer outputName/*=cv::String()*/);
public native void forward(@ByVal GpuMatVector outputBlobs);
public native void forward(@ByVal MatVector outputBlobs, @Str String outputName/*=cv::String()*/);
public native void forward(@ByVal UMatVector outputBlobs, @Str BytePointer outputName/*=cv::String()*/);
public native void forward(@ByVal GpuMatVector outputBlobs, @Str String outputName/*=cv::String()*/);
/** \brief Runs forward pass to compute outputs of layers listed in \p outBlobNames.
* @param outputBlobs contains blobs for first outputs of specified layers.
* @param outBlobNames names for layers which outputs are needed to get
*/
public native void forward(@ByVal MatVector outputBlobs,
@Const @ByRef StringVector outBlobNames);
public native void forward(@ByVal UMatVector outputBlobs,
@Const @ByRef StringVector outBlobNames);
public native void forward(@ByVal GpuMatVector outputBlobs,
@Const @ByRef StringVector outBlobNames);
/** \brief Runs forward pass to compute outputs of layers listed in \p outBlobNames.
* @param outputBlobs contains all output blobs for each layer specified in \p outBlobNames.
* @param outBlobNames names for layers which outputs are needed to get
*/
public native @Name("forward") void forwardAndRetrieve(@StdVector MatVector outputBlobs,
@Const @ByRef StringVector outBlobNames);
/**
* \brief Compile Halide layers.
* @param [in] scheduler Path to YAML file with scheduling directives.
* @see setPreferableBackend
*
* Schedule layers that support Halide backend. Then compile them for
* specific target. For layers that not represented in scheduling file
* or if no manual scheduling used at all, automatic scheduling will be applied.
*/
public native void setHalideScheduler(@Str BytePointer scheduler);
public native void setHalideScheduler(@Str String scheduler);
/**
* \brief Ask network to use specific computation backend where it supported.
* @param [in] backendId backend identifier.
* @see Backend
*/
public native void setPreferableBackend(int backendId);
/**
* \brief Ask network to make computations on specific target device.
* @param [in] targetId target identifier.
* @see Target
*/
public native void setPreferableTarget(int targetId);
/** \brief Sets the new value for the layer output blob
* @param name descriptor of the updating layer output blob.
* @param blob new blob.
* @see connect(String, String) to know format of the descriptor.
* \note If updating blob is not empty then \p blob must have the same shape,
* because network reshaping is not implemented yet.
*/
public native void setInput(@ByVal Mat blob, @Str BytePointer name/*=""*/);
public native void setInput(@ByVal Mat blob);
public native void setInput(@ByVal Mat blob, @Str String name/*=""*/);
public native void setInput(@ByVal UMat blob, @Str String name/*=""*/);
public native void setInput(@ByVal UMat blob);
public native void setInput(@ByVal UMat blob, @Str BytePointer name/*=""*/);
public native void setInput(@ByVal GpuMat blob, @Str BytePointer name/*=""*/);
public native void setInput(@ByVal GpuMat blob);
public native void setInput(@ByVal GpuMat blob, @Str String name/*=""*/);
/** \brief Sets the new value for the learned param of the layer.
* @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array.
* @param blob the new value.
* @see Layer::blobs
* \note If shape of the new blob differs from the previous shape,
* then the following forward pass may fail.
*/
public native void setParam(@ByVal @Cast("cv::dnn::Net::LayerId*") DictValue layer, int numParam, @Const @ByRef Mat blob);
/** \brief Returns parameter blob of the layer.
* @param layer name or id of the layer.
* @param numParam index of the layer parameter in the Layer::blobs array.
* @see Layer::blobs
*/
public native @ByVal Mat getParam(@ByVal @Cast("cv::dnn::Net::LayerId*") DictValue layer, int numParam/*=0*/);
public native @ByVal Mat getParam(@ByVal @Cast("cv::dnn::Net::LayerId*") DictValue layer);
/** \brief Returns indexes of layers with unconnected outputs.
*/
public native @StdVector IntPointer getUnconnectedOutLayers();
/** \brief Returns input and output shapes for all layers in loaded model;
* preliminary inferencing isn't necessary.
* @param netInputShapes shapes for all input blobs in net input layer.
* @param layersIds output parameter for layer IDs.
* @param inLayersShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayersShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
public native void getLayersShapes(@Const @ByRef MatShapeVector netInputShapes,
@StdVector IntPointer layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
public native void getLayersShapes(@Const @ByRef MatShapeVector netInputShapes,
@StdVector IntBuffer layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
public native void getLayersShapes(@Const @ByRef MatShapeVector netInputShapes,
@StdVector int[] layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
/** \overload */
public native void getLayersShapes(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector IntPointer layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
public native void getLayersShapes(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector IntBuffer layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
public native void getLayersShapes(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector int[] layersIds,
@ByRef MatShapeVectorVector inLayersShapes,
@ByRef MatShapeVectorVector outLayersShapes);
/** \brief Returns input and output shapes for layer with specified
* id in loaded model; preliminary inferencing isn't necessary.
* @param netInputShape shape input blob in net input layer.
* @param layerId id for layer.
* @param inLayerShapes output parameter for input layers shapes;
* order is the same as in layersIds
* @param outLayerShapes output parameter for output layers shapes;
* order is the same as in layersIds
*/
public native void getLayerShapes(@Const @StdVector @ByRef IntPointer netInputShape,
int layerId,
@ByRef MatShapeVector inLayerShapes,
@ByRef MatShapeVector outLayerShapes); // FIXIT: CV_WRAP
/** \overload */
public native void getLayerShapes(@Const @ByRef MatShapeVector netInputShapes,
int layerId,
@ByRef MatShapeVector inLayerShapes,
@ByRef MatShapeVector outLayerShapes); // FIXIT: CV_WRAP
/** \brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @return computed FLOP.
*/
public native @Cast("int64") long getFLOPS(@Const @ByRef MatShapeVector netInputShapes);
/** \overload */
public native @Cast("int64") long getFLOPS(@Const @StdVector @ByRef IntPointer netInputShape);
/** \overload */
public native @Cast("int64") long getFLOPS(int layerId,
@Const @ByRef MatShapeVector netInputShapes);
/** \overload */
public native @Cast("int64") long getFLOPS(int layerId,
@Const @StdVector @ByRef IntPointer netInputShape);
/** \brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
public native void getLayerTypes(@ByRef StringVector layersTypes);
/** \brief Returns count of layers of specified type.
* @param layerType type.
* @return count of layers
*/
public native int getLayersCount(@Str BytePointer layerType);
public native int getLayersCount(@Str String layerType);
/** \brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
public native void getMemoryConsumption(@Const @ByRef MatShapeVector netInputShapes,
@Cast("size_t*") @ByRef SizeTPointer weights, @Cast("size_t*") @ByRef SizeTPointer blobs); // FIXIT: CV_WRAP
/** \overload */
public native void getMemoryConsumption(@Const @StdVector @ByRef IntPointer netInputShape,
@Cast("size_t*") @ByRef SizeTPointer weights, @Cast("size_t*") @ByRef SizeTPointer blobs);
/** \overload */
public native void getMemoryConsumption(int layerId,
@Const @ByRef MatShapeVector netInputShapes,
@Cast("size_t*") @ByRef SizeTPointer weights, @Cast("size_t*") @ByRef SizeTPointer blobs);
/** \overload */
public native void getMemoryConsumption(int layerId,
@Const @StdVector @ByRef IntPointer netInputShape,
@Cast("size_t*") @ByRef SizeTPointer weights, @Cast("size_t*") @ByRef SizeTPointer blobs);
/** \brief Computes bytes number which are requered to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
public native void getMemoryConsumption(@Const @ByRef MatShapeVector netInputShapes,
@StdVector IntPointer layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs);
public native void getMemoryConsumption(@Const @ByRef MatShapeVector netInputShapes,
@StdVector IntBuffer layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs);
public native void getMemoryConsumption(@Const @ByRef MatShapeVector netInputShapes,
@StdVector int[] layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs); // FIXIT: CV_WRAP
/** \overload */
public native void getMemoryConsumption(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector IntPointer layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs);
public native void getMemoryConsumption(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector IntBuffer layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs);
public native void getMemoryConsumption(@Const @StdVector @ByRef IntPointer netInputShape,
@StdVector int[] layerIds,
@Cast("size_t*") @StdVector SizeTPointer weights,
@Cast("size_t*") @StdVector SizeTPointer blobs); // FIXIT: CV_WRAP
/** \brief Enables or disables layer fusion in the network.
* @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
*/
public native void enableFusion(@Cast("bool") boolean fusion);
/** \brief Returns overall time for inference and timings (in ticks) for layers.
* Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
* in this case zero ticks count will be return for that skipped layers.
* @param timings vector for tick timings for all layers.
* @return overall ticks for model inference.
*/
public native @Cast("int64") long getPerfProfile(@StdVector DoublePointer timings);
public native @Cast("int64") long getPerfProfile(@StdVector DoubleBuffer timings);
public native @Cast("int64") long getPerfProfile(@StdVector double[] timings);
}
/** \brief Reads a network model stored in Darknet model files.
* @param cfgFile path to the .cfg file with text description of the network architecture.
* @param darknetModel path to the .weights file with learned network.
* @return Network object that ready to do forward, throw an exception in failure cases.
* @return Net object.
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromDarknet(@Str BytePointer cfgFile, @Str BytePointer darknetModel/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromDarknet(@Str BytePointer cfgFile);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromDarknet(@Str String cfgFile, @Str String darknetModel/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromDarknet(@Str String cfgFile);
/** \brief Reads a network model stored in Caffe framework's format.
* @param prototxt path to the .prototxt file with text description of the network architecture.
* @param caffeModel path to the .caffemodel file with learned network.
* @return Net object.
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Str BytePointer prototxt, @Str BytePointer caffeModel/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Str BytePointer prototxt);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Str String prototxt, @Str String caffeModel/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Str String prototxt);
/** \brief Reads a network model stored in Caffe model in memory.
* \details This is an overloaded member function, provided for convenience.
* It differs from the above function only in what argument(s) it accepts.
* @param bufferProto buffer containing the content of the .prototxt file
* @param lenProto length of bufferProto
* @param bufferModel buffer containing the content of the .caffemodel file
* @param lenModel length of bufferModel
* @return Net object.
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Cast("const char*") BytePointer bufferProto, @Cast("size_t") long lenProto,
@Cast("const char*") BytePointer bufferModel/*=NULL*/, @Cast("size_t") long lenModel/*=0*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(@Cast("const char*") BytePointer bufferProto, @Cast("size_t") long lenProto);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(String bufferProto, @Cast("size_t") long lenProto,
String bufferModel/*=NULL*/, @Cast("size_t") long lenModel/*=0*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromCaffe(String bufferProto, @Cast("size_t") long lenProto);
/** \brief Reads a network model stored in TensorFlow framework's format.
* @param model path to the .pb file with binary protobuf description of the network architecture
* @param config path to the .pbtxt file that contains text graph definition in protobuf format.
* Resulting Net object is built by text graph using weights from a binary one that
* let us make it more flexible.
* @return Net object.
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Str BytePointer model, @Str BytePointer config/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Str BytePointer model);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Str String model, @Str String config/*=cv::String()*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Str String model);
/** \brief Reads a network model stored in TensorFlow framework's format.
* \details This is an overloaded member function, provided for convenience.
* It differs from the above function only in what argument(s) it accepts.
* @param bufferModel buffer containing the content of the pb file
* @param lenModel length of bufferModel
* @param bufferConfig buffer containing the content of the pbtxt file
* @param lenConfig length of bufferConfig
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Cast("const char*") BytePointer bufferModel, @Cast("size_t") long lenModel,
@Cast("const char*") BytePointer bufferConfig/*=NULL*/, @Cast("size_t") long lenConfig/*=0*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(@Cast("const char*") BytePointer bufferModel, @Cast("size_t") long lenModel);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(String bufferModel, @Cast("size_t") long lenModel,
String bufferConfig/*=NULL*/, @Cast("size_t") long lenConfig/*=0*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTensorflow(String bufferModel, @Cast("size_t") long lenModel);
/**
* \brief Reads a network model stored in Torch7 framework's format.
* @param model path to the file, dumped from Torch by using torch.save() function.
* @param isBinary specifies whether the network was serialized in ascii mode or binary.
* @return Net object.
*
* \note Ascii mode of Torch serializer is more preferable, because binary mode extensively use {@code long} type of C language,
* which has various bit-length on different systems.
*
* The loading file must contain serialized nn.Module object
* with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
*
* List of supported layers (i.e. object instances derived from Torch nn.Module class):
* - nn.Sequential
* - nn.Parallel
* - nn.Concat
* - nn.Linear
* - nn.SpatialConvolution
* - nn.SpatialMaxPooling, nn.SpatialAveragePooling
* - nn.ReLU, nn.TanH, nn.Sigmoid
* - nn.Reshape
* - nn.SoftMax, nn.LogSoftMax
*
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
*/
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTorch(@Str BytePointer model, @Cast("bool") boolean isBinary/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTorch(@Str BytePointer model);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTorch(@Str String model, @Cast("bool") boolean isBinary/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Net readNetFromTorch(@Str String model);
/** \brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.
* \warning This function has the same limitations as readNetFromTorch().
*/
@Namespace("cv::dnn") public static native @ByVal Mat readTorchBlob(@Str BytePointer filename, @Cast("bool") boolean isBinary/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat readTorchBlob(@Str BytePointer filename);
@Namespace("cv::dnn") public static native @ByVal Mat readTorchBlob(@Str String filename, @Cast("bool") boolean isBinary/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat readTorchBlob(@Str String filename);
/** \brief Creates 4-dimensional blob from image. Optionally resizes and crops \p image from center,
* subtract \p mean values, scales values by \p scalefactor, swap Blue and Red channels.
* @param image input image (with 1-, 3- or 4-channels).
* @param size spatial size for output image
* @param mean scalar with mean values which are subtracted from channels. Values are intended
* to be in (mean-R, mean-G, mean-B) order if \p image has BGR ordering and \p swapRB is true.
* @param scalefactor multiplier for \p image values.
* @param swapRB flag which indicates that swap first and last channels
* in 3-channel image is necessary.
* @param crop flag which indicates whether image will be cropped after resize or not
* \details if \p crop is true, input image is resized so one side after resize is equal to corresponing
* dimension in \p size and another one is equal or larger. Then, crop from the center is performed.
* If \p crop is false, direct resize without cropping and preserving aspect ratio is performed.
* @return 4-dimansional Mat with NCHW dimensions order.
*/
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal Mat image, double scalefactor/*=1.0*/, @Const @ByRef(nullValue = "cv::Size()") Size size,
@Const @ByRef(nullValue = "cv::Scalar()") Scalar mean, @Cast("bool") boolean swapRB/*=true*/, @Cast("bool") boolean crop/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal Mat image);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal UMat image, double scalefactor/*=1.0*/, @Const @ByRef(nullValue = "cv::Size()") Size size,
@Const @ByRef(nullValue = "cv::Scalar()") Scalar mean, @Cast("bool") boolean swapRB/*=true*/, @Cast("bool") boolean crop/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal UMat image);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal GpuMat image, double scalefactor/*=1.0*/, @Const @ByRef(nullValue = "cv::Size()") Size size,
@Const @ByRef(nullValue = "cv::Scalar()") Scalar mean, @Cast("bool") boolean swapRB/*=true*/, @Cast("bool") boolean crop/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImage(@ByVal GpuMat image);
/** \brief Creates 4-dimensional blob from series of images. Optionally resizes and
* crops \p images from center, subtract \p mean values, scales values by \p scalefactor,
* swap Blue and Red channels.
* @param images input images (all with 1-, 3- or 4-channels).
* @param size spatial size for output image
* @param mean scalar with mean values which are subtracted from channels. Values are intended
* to be in (mean-R, mean-G, mean-B) order if \p image has BGR ordering and \p swapRB is true.
* @param scalefactor multiplier for \p images values.
* @param swapRB flag which indicates that swap first and last channels
* in 3-channel image is necessary.
* @param crop flag which indicates whether image will be cropped after resize or not
* \details if \p crop is true, input image is resized so one side after resize is equal to corresponing
* dimension in \p size and another one is equal or larger. Then, crop from the center is performed.
* If \p crop is false, direct resize without cropping and preserving aspect ratio is performed.
* @return 4-dimansional Mat with NCHW dimensions order.
*/
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImages(@Const @ByRef MatVector images, double scalefactor/*=1.0*/,
@ByVal(nullValue = "cv::Size()") Size size, @Const @ByRef(nullValue = "cv::Scalar()") Scalar mean, @Cast("bool") boolean swapRB/*=true*/, @Cast("bool") boolean crop/*=true*/);
@Namespace("cv::dnn") public static native @ByVal Mat blobFromImages(@Const @ByRef MatVector images);
/** \brief Convert all weights of Caffe network to half precision floating point.
* @param src Path to origin model from Caffe framework contains single
* precision floating point weights (usually has {@code .caffemodel} extension).
* @param dst Path to destination model with updated weights.
* @param layersTypes Set of layers types which parameters will be converted.
* By default, converts only Convolutional and Fully-Connected layers'
* weights.
*
* \note Shrinked model has no origin float32 weights so it can't be used
* in origin Caffe framework anymore. However the structure of data
* is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
* So the resulting model may be used there.
*/
@Namespace("cv::dnn") public static native void shrinkCaffeModel(@Str BytePointer src, @Str BytePointer dst,
@Const @ByRef(nullValue = "std::vector()") StringVector layersTypes);
@Namespace("cv::dnn") public static native void shrinkCaffeModel(@Str BytePointer src, @Str BytePointer dst);
@Namespace("cv::dnn") public static native void shrinkCaffeModel(@Str String src, @Str String dst,
@Const @ByRef(nullValue = "std::vector()") StringVector layersTypes);
@Namespace("cv::dnn") public static native void shrinkCaffeModel(@Str String src, @Str String dst);
/** \brief Performs non maximum suppression given boxes and corresponding scores.
* @param bboxes a set of bounding boxes to apply NMS.
* @param scores a set of corresponding confidences.
* @param score_threshold a threshold used to filter boxes by score.
* @param nms_threshold a threshold used in non maximum suppression.
* @param indices the kept indices of bboxes after NMS.
* @param eta a coefficient in adaptive threshold formula: \f$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\f$.
* @param top_k if {@code >0}, keep at most \p top_k picked indices.
*/
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector FloatPointer scores,
float score_threshold, float nms_threshold,
@StdVector IntPointer indices,
float eta/*=1.f*/, int top_k/*=0*/);
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector FloatPointer scores,
float score_threshold, float nms_threshold,
@StdVector IntPointer indices);
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector FloatBuffer scores,
float score_threshold, float nms_threshold,
@StdVector IntBuffer indices,
float eta/*=1.f*/, int top_k/*=0*/);
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector FloatBuffer scores,
float score_threshold, float nms_threshold,
@StdVector IntBuffer indices);
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector float[] scores,
float score_threshold, float nms_threshold,
@StdVector int[] indices,
float eta/*=1.f*/, int top_k/*=0*/);
@Namespace("cv::dnn") public static native void NMSBoxes(@Const @ByRef RectVector bboxes, @StdVector float[] scores,
float score_threshold, float nms_threshold,
@StdVector int[] indices);
/** \} */
// #include
// #include
// #endif /* OPENCV_DNN_DNN_HPP */
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #ifndef OPENCV_DNN_LAYER_HPP
// #define OPENCV_DNN_LAYER_HPP
// #include
/** \addtogroup dnn
* \{
*
* \defgroup dnnLayerFactory Utilities for New Layers Registration
* \{
/** \brief %Layer factory allows to create instances of registered layers. */
@Namespace("cv::dnn") public static class LayerFactory extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LayerFactory(Pointer p) { super(p); }
/** Each Layer class must provide this function to the factory */
@Convention(value="", extern="C++") public static class Constuctor extends FunctionPointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Constuctor(Pointer p) { super(p); }
protected Constuctor() { allocate(); }
private native void allocate();
public native @Ptr Layer call(@ByRef LayerParams params);
}
/** Registers the layer class with typename \p type and specified \p constructor. Thread-safe. */
public static native void registerLayer(@Str BytePointer type, Constuctor constructor);
public static native void registerLayer(@Str String type, Constuctor constructor);
/** Unregisters registered layer with specified type name. Thread-safe. */
public static native void unregisterLayer(@Str BytePointer type);
public static native void unregisterLayer(@Str String type);
/** \brief Creates instance of registered layer.
* @param type type name of creating layer.
* @param params parameters which will be used for layer initialization.
* \note Thread-safe.
*/
public static native @Ptr Layer createLayerInstance(@Str BytePointer type, @ByRef LayerParams params);
public static native @Ptr Layer createLayerInstance(@Str String type, @ByRef LayerParams params);
}
/** \}
* \} */
// #endif
// Parsed from
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// #ifndef OPENCV_DNN_DNN_SHAPE_UTILS_HPP
// #define OPENCV_DNN_DNN_SHAPE_UTILS_HPP
// #include
// #include
// #include
//Useful shortcut
@Namespace("cv::dnn") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer s, @ByRef Range r);
//Slicing
@Namespace("cv::dnn") public static class _Range extends Range {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public _Range(Pointer p) { super(p); }
public _Range(@Const @ByRef Range r) { super((Pointer)null); allocate(r); }
private native void allocate(@Const @ByRef Range r);
public _Range(int start_, int size_/*=1*/) { super((Pointer)null); allocate(start_, size_); }
private native void allocate(int start_, int size_/*=1*/);
public _Range(int start_) { super((Pointer)null); allocate(start_); }
private native void allocate(int start_);
}
@Namespace("cv::dnn") public static native @ByVal Mat slice(@Const @ByRef Mat m, @Const @ByRef _Range r0);
@Namespace("cv::dnn") public static native @ByVal Mat slice(@Const @ByRef Mat m, @Const @ByRef _Range r0, @Const @ByRef _Range r1);
@Namespace("cv::dnn") public static native @ByVal Mat slice(@Const @ByRef Mat m, @Const @ByRef _Range r0, @Const @ByRef _Range r1, @Const @ByRef _Range r2);
@Namespace("cv::dnn") public static native @ByVal Mat slice(@Const @ByRef Mat m, @Const @ByRef _Range r0, @Const @ByRef _Range r1, @Const @ByRef _Range r2, @Const @ByRef _Range r3);
@Namespace("cv::dnn") public static native @ByVal Mat getPlane(@Const @ByRef Mat m, int n, int cn);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const IntPointer dims, int n/*=4*/);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const IntPointer dims);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const IntBuffer dims, int n/*=4*/);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const IntBuffer dims);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const int[] dims, int n/*=4*/);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const int[] dims);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const @ByRef Mat mat);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(@Const @ByRef UMat mat);
public static native @Cast("bool") @Namespace("cv::dnn") boolean is_neg(int i);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(int a0, int a1/*=-1*/, int a2/*=-1*/, int a3/*=-1*/);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer shape(int a0);
@Namespace("cv::dnn") public static native int total(@Const @StdVector @ByRef IntPointer shape, int start/*=-1*/, int end/*=-1*/);
@Namespace("cv::dnn") public static native int total(@Const @StdVector @ByRef IntPointer shape);
@Namespace("cv::dnn") public static native @StdVector @ByVal IntPointer concat(@Const @StdVector @ByRef IntPointer a, @Const @StdVector @ByRef IntPointer b);
@Namespace("cv::dnn") public static native void print(@Const @StdVector @ByRef IntPointer shape, @Str BytePointer name/*=""*/);
@Namespace("cv::dnn") public static native void print(@Const @StdVector @ByRef IntPointer shape);
@Namespace("cv::dnn") public static native void print(@Const @StdVector @ByRef IntPointer shape, @Str String name/*=""*/);
@Namespace("cv::dnn") public static native int clamp(int ax, int dims);
@Namespace("cv::dnn") public static native int clamp(int ax, @Const @StdVector @ByRef IntPointer shape);
@Namespace("cv::dnn") public static native @ByVal Range clamp(@Const @ByRef Range r, int axisSize);
// #endif
}