All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.opencv.core.Core Maven / Gradle / Ivy


//
// This file is auto-generated. Please don't modify it!
//
package org.opencv.core;

import java.lang.String;
import java.util.ArrayList;
import java.util.List;
import org.opencv.utils.Converters;

public class Core {

    // these constants are wrapped inside functions to prevent inlining
    private static String getVersion() { return "2.4.8.0"; }
    private static String getNativeLibraryName() { return "opencv_java248"; }
    private static int getVersionEpoch() { return 2; }
    private static int getVersionMajor() { return 4; }
    private static int getVersionMinor() { return 8; }
    private static int getVersionRevision() { return 0; }

    public static final String VERSION = getVersion();
    public static final String NATIVE_LIBRARY_NAME = getNativeLibraryName();
    public static final int VERSION_EPOCH = getVersionEpoch();
    public static final int VERSION_MAJOR = getVersionMajor();
    public static final int VERSION_MINOR = getVersionMinor();
    public static final int VERSION_REVISION = getVersionRevision();

    private static final int
            CV_8U = 0,
            CV_8S = 1,
            CV_16U = 2,
            CV_16S = 3,
            CV_32S = 4,
            CV_32F = 5,
            CV_64F = 6,
            CV_USRTYPE1 = 7;


    public static final int
            SVD_MODIFY_A = 1,
            SVD_NO_UV = 2,
            SVD_FULL_UV = 4,
            FILLED = -1,
            LINE_AA = 16,
            LINE_8 = 8,
            LINE_4 = 4,
            REDUCE_SUM = 0,
            REDUCE_AVG = 1,
            REDUCE_MAX = 2,
            REDUCE_MIN = 3,
            DECOMP_LU = 0,
            DECOMP_SVD = 1,
            DECOMP_EIG = 2,
            DECOMP_CHOLESKY = 3,
            DECOMP_QR = 4,
            DECOMP_NORMAL = 16,
            NORM_INF = 1,
            NORM_L1 = 2,
            NORM_L2 = 4,
            NORM_L2SQR = 5,
            NORM_HAMMING = 6,
            NORM_HAMMING2 = 7,
            NORM_TYPE_MASK = 7,
            NORM_RELATIVE = 8,
            NORM_MINMAX = 32,
            CMP_EQ = 0,
            CMP_GT = 1,
            CMP_GE = 2,
            CMP_LT = 3,
            CMP_LE = 4,
            CMP_NE = 5,
            GEMM_1_T = 1,
            GEMM_2_T = 2,
            GEMM_3_T = 4,
            DFT_INVERSE = 1,
            DFT_SCALE = 2,
            DFT_ROWS = 4,
            DFT_COMPLEX_OUTPUT = 16,
            DFT_REAL_OUTPUT = 32,
            DCT_INVERSE = DFT_INVERSE,
            DCT_ROWS = DFT_ROWS,
            DEPTH_MASK_8U = 1 << CV_8U,
            DEPTH_MASK_8S = 1 << CV_8S,
            DEPTH_MASK_16U = 1 << CV_16U,
            DEPTH_MASK_16S = 1 << CV_16S,
            DEPTH_MASK_32S = 1 << CV_32S,
            DEPTH_MASK_32F = 1 << CV_32F,
            DEPTH_MASK_64F = 1 << CV_64F,
            DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1,
            DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S,
            DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F,
            MAGIC_MASK = 0xFFFF0000,
            TYPE_MASK = 0x00000FFF,
            DEPTH_MASK = 7,
            SORT_EVERY_ROW = 0,
            SORT_EVERY_COLUMN = 1,
            SORT_ASCENDING = 0,
            SORT_DESCENDING = 16,
            COVAR_SCRAMBLED = 0,
            COVAR_NORMAL = 1,
            COVAR_USE_AVG = 2,
            COVAR_SCALE = 4,
            COVAR_ROWS = 8,
            COVAR_COLS = 16,
            KMEANS_RANDOM_CENTERS = 0,
            KMEANS_PP_CENTERS = 2,
            KMEANS_USE_INITIAL_LABELS = 1,
            FONT_HERSHEY_SIMPLEX = 0,
            FONT_HERSHEY_PLAIN = 1,
            FONT_HERSHEY_DUPLEX = 2,
            FONT_HERSHEY_COMPLEX = 3,
            FONT_HERSHEY_TRIPLEX = 4,
            FONT_HERSHEY_COMPLEX_SMALL = 5,
            FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
            FONT_HERSHEY_SCRIPT_COMPLEX = 7,
            FONT_ITALIC = 16;


    //
    // C++:  void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0)
    //

/**
 * 

Performs a look-up table transform of an array.

* *

The function LUT fills the output array with values from the * look-up table. Indices of the entries are taken from the input array. That * is, the function processes each element of src as follows:

* *

dst(I) <- lut(src(I) + d)

* *

where

* *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

* * @param src input array of 8-bit elements. * @param lut look-up table of 256 elements; in case of multi-channel input * array, the table should either have a single channel (in this case the same * table is used for all channels) or the same number of channels as in the * input array. * @param dst output array of the same size and number of channels as * src, and the same depth as lut. * @param interpolation a interpolation * * @see org.opencv.core.Core.LUT * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#convertScaleAbs */ public static void LUT(Mat src, Mat lut, Mat dst, int interpolation) { LUT_0(src.nativeObj, lut.nativeObj, dst.nativeObj, interpolation); return; } /** *

Performs a look-up table transform of an array.

* *

The function LUT fills the output array with values from the * look-up table. Indices of the entries are taken from the input array. That * is, the function processes each element of src as follows:

* *

dst(I) <- lut(src(I) + d)

* *

where

* *

d = 0 if src has depth CV_8U; 128 if src has depth CV_8S

* * @param src input array of 8-bit elements. * @param lut look-up table of 256 elements; in case of multi-channel input * array, the table should either have a single channel (in this case the same * table is used for all channels) or the same number of channels as in the * input array. * @param dst output array of the same size and number of channels as * src, and the same depth as lut. * * @see org.opencv.core.Core.LUT * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#convertScaleAbs */ public static void LUT(Mat src, Mat lut, Mat dst) { LUT_1(src.nativeObj, lut.nativeObj, dst.nativeObj); return; } // // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) // /** *

Calculates the Mahalanobis distance between two vectors.

* *

The function Mahalanobis calculates and returns the weighted * distance between two vectors:

* *

d(vec1, vec2)= sqrt(sum_(i,j)(icovar(i,j)*(vec1(I)-vec2(I))*(vec1(j)-vec2(j))))

* *

The covariance matrix may be calculated using the "calcCovarMatrix" function * and then inverted using the "invert" function (preferably using the * DECOMP_SVD method, as the most accurate).

* * @param v1 a v1 * @param v2 a v2 * @param icovar inverse covariance matrix. * * @see org.opencv.core.Core.Mahalanobis */ public static double Mahalanobis(Mat v1, Mat v2, Mat icovar) { double retVal = Mahalanobis_0(v1.nativeObj, v2.nativeObj, icovar.nativeObj); return retVal; } // // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) // public static void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat result) { PCABackProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); return; } // // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) // public static void PCACompute(Mat data, Mat mean, Mat eigenvectors, int maxComponents) { PCACompute_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, maxComponents); return; } public static void PCACompute(Mat data, Mat mean, Mat eigenvectors) { PCACompute_1(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj); return; } // // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) // public static void PCAComputeVar(Mat data, Mat mean, Mat eigenvectors, double retainedVariance) { PCAComputeVar_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, retainedVariance); return; } // // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) // public static void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat result) { PCAProject_0(data.nativeObj, mean.nativeObj, eigenvectors.nativeObj, result.nativeObj); return; } // // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) // public static void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat dst) { SVBackSubst_0(w.nativeObj, u.nativeObj, vt.nativeObj, rhs.nativeObj, dst.nativeObj); return; } // // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) // public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt, int flags) { SVDecomp_0(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj, flags); return; } public static void SVDecomp(Mat src, Mat w, Mat u, Mat vt) { SVDecomp_1(src.nativeObj, w.nativeObj, u.nativeObj, vt.nativeObj); return; } // // C++: void absdiff(Mat src1, Mat src2, Mat& dst) // /** *

Calculates the per-element absolute difference between two arrays or between * an array and a scalar.

* *

The function absdiff calculates:

*
    *
  • Absolute difference between two arrays when they have the same size * and type: *
* *

dst(I) = saturate(| src1(I) - src2(I)|)

* *
    *
  • Absolute difference between an array and a scalar when the second * array is constructed from Scalar or has as many elements as the * number of channels in src1: *
* *

dst(I) = saturate(| src1(I) - src2|)

* *
    *
  • Absolute difference between a scalar and an array when the first array * is constructed from Scalar or has as many elements as the number * of channels in src2: *
* *

dst(I) = saturate(| src1 - src2(I)|)

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently.

* *

Note: Saturation is not applied when the arrays have the depth * CV_32S. You may even get a negative value in the case of * overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as input arrays. * * @see org.opencv.core.Core.absdiff */ public static void absdiff(Mat src1, Mat src2, Mat dst) { absdiff_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) // /** *

Calculates the per-element absolute difference between two arrays or between * an array and a scalar.

* *

The function absdiff calculates:

*
    *
  • Absolute difference between two arrays when they have the same size * and type: *
* *

dst(I) = saturate(| src1(I) - src2(I)|)

* *
    *
  • Absolute difference between an array and a scalar when the second * array is constructed from Scalar or has as many elements as the * number of channels in src1: *
* *

dst(I) = saturate(| src1(I) - src2|)

* *
    *
  • Absolute difference between a scalar and an array when the first array * is constructed from Scalar or has as many elements as the number * of channels in src2: *
* *

dst(I) = saturate(| src1 - src2(I)|)

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently.

* *

Note: Saturation is not applied when the arrays have the depth * CV_32S. You may even get a negative value in the case of * overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as input arrays. * * @see org.opencv.core.Core.absdiff */ public static void absdiff(Mat src1, Scalar src2, Mat dst) { absdiff_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) // /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * @param mask optional operation mask - 8-bit single channel array, that * specifies elements of the output array to be changed. * @param dtype optional depth of the output array (see the discussion below). * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) { add_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); return; } /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * @param mask optional operation mask - 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Mat src2, Mat dst, Mat mask) { add_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Mat src2, Mat dst) { add_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) // /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * @param mask optional operation mask - 8-bit single channel array, that * specifies elements of the output array to be changed. * @param dtype optional depth of the output array (see the discussion below). * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) { add_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); return; } /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * @param mask optional operation mask - 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Scalar src2, Mat dst, Mat mask) { add_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element sum of two arrays or an array and a scalar.

* *

The function add calculates:

*
    *
  • Sum of two arrays when both input arrays have the same size and the * same number of channels: *
* *

dst(I) = saturate(src1(I) + src2(I)) if mask(I) != 0

* *
    *
  • Sum of an array and a scalar when src2 is constructed * from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) + src2) if mask(I) != 0

* *
    *
  • Sum of a scalar and an array when src1 is constructed * from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 + src2(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 + src2;

* *

dst += src1; // equivalent to add(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can add a 16-bit unsigned array to a 8-bit signed * array and store the sum as a 32-bit floating-point array. Depth of the output * array is determined by the dtype parameter. In the second and * third cases above, as well as in the first case, when src1.depth() == * src2.depth(), dtype can be set to the default * -1. In this case, the output array will have the same depth as * the input array, be it src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and number of channels as the * input array(s); the depth is defined by dtype or * src1/src2. * * @see org.opencv.core.Core.add * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract */ public static void add(Mat src1, Scalar src2, Mat dst) { add_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) // /** *

Calculates the weighted sum of two arrays.

* *

The function addWeighted calculates the weighted sum of two * arrays as follows:

* *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The function can be replaced with a matrix expression:

* *

// C++ code:

* *

dst = src1*alpha + src2*beta + gamma;

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow. *

* * @param src1 first input array. * @param alpha weight of the first array elements. * @param src2 second input array of the same size and channel number as * src1. * @param beta weight of the second array elements. * @param gamma scalar added to each sum. * @param dst output array that has the same size and number of channels as the * input arrays. * @param dtype optional depth of the output array; when both input arrays have * the same depth, dtype can be set to -1, which will * be equivalent to src1.depth(). * * @see org.opencv.core.Core.addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.core.Mat#convertTo */ public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst, int dtype) { addWeighted_0(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj, dtype); return; } /** *

Calculates the weighted sum of two arrays.

* *

The function addWeighted calculates the weighted sum of two * arrays as follows:

* *

dst(I)= saturate(src1(I)* alpha + src2(I)* beta + gamma)

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The function can be replaced with a matrix expression:

* *

// C++ code:

* *

dst = src1*alpha + src2*beta + gamma;

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow. *

* * @param src1 first input array. * @param alpha weight of the first array elements. * @param src2 second input array of the same size and channel number as * src1. * @param beta weight of the second array elements. * @param gamma scalar added to each sum. * @param dst output array that has the same size and number of channels as the * input arrays. * * @see org.opencv.core.Core.addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.core.Mat#convertTo */ public static void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat dst) { addWeighted_1(src1.nativeObj, alpha, src2.nativeObj, beta, gamma, dst.nativeObj); return; } // // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) // public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K, Mat mask, int update, boolean crosscheck) { batchDistance_0(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K, mask.nativeObj, update, crosscheck); return; } public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx, int normType, int K) { batchDistance_1(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj, normType, K); return; } public static void batchDistance(Mat src1, Mat src2, Mat dist, int dtype, Mat nidx) { batchDistance_2(src1.nativeObj, src2.nativeObj, dist.nativeObj, dtype, nidx.nativeObj); return; } // // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) // /** *

Calculates the per-element bit-wise conjunction of two arrays or an array and * a scalar.

* *

The function calculates the per-element bit-wise logical conjunction for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I) / src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I) / src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1 / src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the second * and third cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * @param mask optional operation mask, 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.bitwise_and */ public static void bitwise_and(Mat src1, Mat src2, Mat dst, Mat mask) { bitwise_and_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element bit-wise conjunction of two arrays or an array and * a scalar.

* *

The function calculates the per-element bit-wise logical conjunction for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I) / src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I) / src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1 / src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the second * and third cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * * @see org.opencv.core.Core.bitwise_and */ public static void bitwise_and(Mat src1, Mat src2, Mat dst) { bitwise_and_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) // /** *

Inverts every bit of an array.

* *

The function calculates per-element bit-wise inversion of the input array:

* *

dst(I) = !src(I)

* *

In case of a floating-point input array, its machine-specific bit * representation (usually IEEE754-compliant) is used for the operation. In case * of multi-channel arrays, each channel is processed independently.

* * @param src input array. * @param dst output array that has the same size and type as the input array. * @param mask optional operation mask, 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.bitwise_not */ public static void bitwise_not(Mat src, Mat dst, Mat mask) { bitwise_not_0(src.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Inverts every bit of an array.

* *

The function calculates per-element bit-wise inversion of the input array:

* *

dst(I) = !src(I)

* *

In case of a floating-point input array, its machine-specific bit * representation (usually IEEE754-compliant) is used for the operation. In case * of multi-channel arrays, each channel is processed independently.

* * @param src input array. * @param dst output array that has the same size and type as the input array. * * @see org.opencv.core.Core.bitwise_not */ public static void bitwise_not(Mat src, Mat dst) { bitwise_not_1(src.nativeObj, dst.nativeObj); return; } // // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) // /** *

Calculates the per-element bit-wise disjunction of two arrays or an array and * a scalar.

* *

The function calculates the per-element bit-wise logical disjunction for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I) V src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I) V src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1 V src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the second * and third cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * @param mask optional operation mask, 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.bitwise_or */ public static void bitwise_or(Mat src1, Mat src2, Mat dst, Mat mask) { bitwise_or_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element bit-wise disjunction of two arrays or an array and * a scalar.

* *

The function calculates the per-element bit-wise logical disjunction for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I) V src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I) V src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1 V src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the second * and third cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * * @see org.opencv.core.Core.bitwise_or */ public static void bitwise_or(Mat src1, Mat src2, Mat dst) { bitwise_or_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) // /** *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or * an array and a scalar.

* *

The function calculates the per-element bit-wise logical "exclusive-or" * operation for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I)(+) src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1(+) src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the 2nd and * 3rd cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * @param mask optional operation mask, 8-bit single channel array, that * specifies elements of the output array to be changed. * * @see org.opencv.core.Core.bitwise_xor */ public static void bitwise_xor(Mat src1, Mat src2, Mat dst, Mat mask) { bitwise_xor_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element bit-wise "exclusive or" operation on two arrays or * an array and a scalar.

* *

The function calculates the per-element bit-wise logical "exclusive-or" * operation for:

*
    *
  • Two arrays when src1 and src2 have the same * size: *
* *

dst(I) = src1(I)(+) src2(I) if mask(I) != 0

* *
    *
  • An array and a scalar when src2 is constructed from * Scalar or has the same number of elements as src1.channels(): *
* *

dst(I) = src1(I)(+) src2 if mask(I) != 0

* *
    *
  • A scalar and an array when src1 is constructed from * Scalar or has the same number of elements as src2.channels(): *
* *

dst(I) = src1(+) src2(I) if mask(I) != 0

* *

In case of floating-point arrays, their machine-specific bit representations * (usually IEEE754-compliant) are used for the operation. In case of * multi-channel arrays, each channel is processed independently. In the 2nd and * 3rd cases above, the scalar is first converted to the array type.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array that has the same size and type as the input arrays. * * @see org.opencv.core.Core.bitwise_xor */ public static void bitwise_xor(Mat src1, Mat src2, Mat dst) { bitwise_xor_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) // /** *

Calculates the covariance matrix of a set of vectors.

* *

The functions calcCovarMatrix calculate the covariance matrix * and, optionally, the mean vector of the set of input vectors.

* * @param samples samples stored either as separate matrices or as rows/columns * of a single matrix. * @param covar output covariance matrix of the type ctype and * square size. * @param mean input or output (depending on the flags) array as the average * value of the input vectors. * @param flags operation flags as a combination of the following values: *
    *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: *
* *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, * vects [1]- mean,...],

* *

The covariance matrix will be nsamples x nsamples. Such an * unusual covariance matrix is used for fast PCA of a set of very large vectors * (see, for example, the EigenFaces technique for face recognition). * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true * covariance matrix. The "true" eigenvectors can be easily calculated from the * eigenvectors of the "scrambled" covariance matrix.

*
    *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: *
* *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, * vects [1]- mean,...]^T,

* *

covar will be a square matrix of the same size as the total * number of elements in each input vector. One and only one of * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be * specified.

*
    *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not * calculate mean from the input vectors but, instead, uses the * passed mean vector. This is useful if mean has been * pre-calculated or known in advance, or if the covariance matrix is calculated * by parts. In this case, mean is not a mean vector of the input * sub-set of vectors but rather the mean vector of the whole set. *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is * scaled. In the "normal" mode, scale is 1./nsamples. * In the "scrambled" mode, scale is the reciprocal of the total * number of elements in each input vector. By default (if the flag is not * specified), the covariance matrix is not scaled (scale=1). *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If * the flag is specified, all the input vectors are stored as rows of the * samples matrix. mean should be a single-row vector * in this case. *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If * the flag is specified, all the input vectors are stored as columns of the * samples matrix. mean should be a single-column * vector in this case. *
* @param ctype type of the matrixl; it equals 'CV_64F' by default. * * @see org.opencv.core.Core.calcCovarMatrix * @see org.opencv.core.Core#Mahalanobis * @see org.opencv.core.Core#mulTransposed */ public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags, int ctype) { calcCovarMatrix_0(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags, ctype); return; } /** *

Calculates the covariance matrix of a set of vectors.

* *

The functions calcCovarMatrix calculate the covariance matrix * and, optionally, the mean vector of the set of input vectors.

* * @param samples samples stored either as separate matrices or as rows/columns * of a single matrix. * @param covar output covariance matrix of the type ctype and * square size. * @param mean input or output (depending on the flags) array as the average * value of the input vectors. * @param flags operation flags as a combination of the following values: *
    *
  • CV_COVAR_SCRAMBLED The output covariance matrix is calculated as: *
* *

scale * [ vects [0]- mean, vects [1]- mean,...]^T * [ vects [0]- mean, * vects [1]- mean,...],

* *

The covariance matrix will be nsamples x nsamples. Such an * unusual covariance matrix is used for fast PCA of a set of very large vectors * (see, for example, the EigenFaces technique for face recognition). * Eigenvalues of this "scrambled" matrix match the eigenvalues of the true * covariance matrix. The "true" eigenvectors can be easily calculated from the * eigenvectors of the "scrambled" covariance matrix.

*
    *
  • CV_COVAR_NORMAL The output covariance matrix is calculated as: *
* *

scale * [ vects [0]- mean, vects [1]- mean,...] * [ vects [0]- mean, * vects [1]- mean,...]^T,

* *

covar will be a square matrix of the same size as the total * number of elements in each input vector. One and only one of * CV_COVAR_SCRAMBLED and CV_COVAR_NORMAL must be * specified.

*
    *
  • CV_COVAR_USE_AVG If the flag is specified, the function does not * calculate mean from the input vectors but, instead, uses the * passed mean vector. This is useful if mean has been * pre-calculated or known in advance, or if the covariance matrix is calculated * by parts. In this case, mean is not a mean vector of the input * sub-set of vectors but rather the mean vector of the whole set. *
  • CV_COVAR_SCALE If the flag is specified, the covariance matrix is * scaled. In the "normal" mode, scale is 1./nsamples. * In the "scrambled" mode, scale is the reciprocal of the total * number of elements in each input vector. By default (if the flag is not * specified), the covariance matrix is not scaled (scale=1). *
  • CV_COVAR_ROWS [Only useful in the second variant of the function] If * the flag is specified, all the input vectors are stored as rows of the * samples matrix. mean should be a single-row vector * in this case. *
  • CV_COVAR_COLS [Only useful in the second variant of the function] If * the flag is specified, all the input vectors are stored as columns of the * samples matrix. mean should be a single-column * vector in this case. *
* * @see org.opencv.core.Core.calcCovarMatrix * @see org.opencv.core.Core#Mahalanobis * @see org.opencv.core.Core#mulTransposed */ public static void calcCovarMatrix(Mat samples, Mat covar, Mat mean, int flags) { calcCovarMatrix_1(samples.nativeObj, covar.nativeObj, mean.nativeObj, flags); return; } // // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) // /** *

Calculates the magnitude and angle of 2D vectors.

* *

The function cartToPolar calculates either the magnitude, angle, * or both for every 2D vector (x(I),y(I)):

* *

magnitude(I)= sqrt(x(I)^2+y(I)^2), * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

* *

The angles are calculated with accuracy about 0.3 degrees. For the point * (0,0), the angle is set to 0.

* * @param x array of x-coordinates; this must be a single-precision or * double-precision floating-point array. * @param y array of y-coordinates, that must have the same size and same type * as x. * @param magnitude output array of magnitudes of the same size and type as * x. * @param angle output array of angles that has the same size and type as * x; the angles are measured in radians (from 0 to 2*Pi) or in * degrees (0 to 360 degrees). * @param angleInDegrees a flag, indicating whether the angles are measured in * radians (which is by default), or in degrees. * * @see org.opencv.core.Core.cartToPolar * @see org.opencv.imgproc.Imgproc#Scharr * @see org.opencv.imgproc.Imgproc#Sobel */ public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle, boolean angleInDegrees) { cartToPolar_0(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj, angleInDegrees); return; } /** *

Calculates the magnitude and angle of 2D vectors.

* *

The function cartToPolar calculates either the magnitude, angle, * or both for every 2D vector (x(I),y(I)):

* *

magnitude(I)= sqrt(x(I)^2+y(I)^2), * angle(I)= atan2(y(I), x(I))[ *180 / pi ]

* *

The angles are calculated with accuracy about 0.3 degrees. For the point * (0,0), the angle is set to 0.

* * @param x array of x-coordinates; this must be a single-precision or * double-precision floating-point array. * @param y array of y-coordinates, that must have the same size and same type * as x. * @param magnitude output array of magnitudes of the same size and type as * x. * @param angle output array of angles that has the same size and type as * x; the angles are measured in radians (from 0 to 2*Pi) or in * degrees (0 to 360 degrees). * * @see org.opencv.core.Core.cartToPolar * @see org.opencv.imgproc.Imgproc#Scharr * @see org.opencv.imgproc.Imgproc#Sobel */ public static void cartToPolar(Mat x, Mat y, Mat magnitude, Mat angle) { cartToPolar_1(x.nativeObj, y.nativeObj, magnitude.nativeObj, angle.nativeObj); return; } // // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) // /** *

Checks every element of an input array for invalid values.

* *

The functions checkRange check that every array element is * neither NaN nor infinite. When minVal < -DBL_MAX and * maxVal < DBL_MAX, the functions also check that each value is * between minVal and maxVal. In case of multi-channel * arrays, each channel is processed independently. * If some values are out of range, position of the first outlier is stored in * pos (when pos != NULL). Then, the functions either * return false (when quiet=true) or throw an exception.

* * @param a input array. * @param quiet a flag, indicating whether the functions quietly return false * when the array elements are out of range or they throw an exception. * @param minVal inclusive lower boundary of valid values range. * @param maxVal exclusive upper boundary of valid values range. * * @see org.opencv.core.Core.checkRange */ public static boolean checkRange(Mat a, boolean quiet, double minVal, double maxVal) { boolean retVal = checkRange_0(a.nativeObj, quiet, minVal, maxVal); return retVal; } /** *

Checks every element of an input array for invalid values.

* *

The functions checkRange check that every array element is * neither NaN nor infinite. When minVal < -DBL_MAX and * maxVal < DBL_MAX, the functions also check that each value is * between minVal and maxVal. In case of multi-channel * arrays, each channel is processed independently. * If some values are out of range, position of the first outlier is stored in * pos (when pos != NULL). Then, the functions either * return false (when quiet=true) or throw an exception.

* * @param a input array. * * @see org.opencv.core.Core.checkRange */ public static boolean checkRange(Mat a) { boolean retVal = checkRange_1(a.nativeObj); return retVal; } // // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) // /** *

Draws a circle.

* *

The function circle draws a simple or filled circle with a given * center and radius.

* * @param img Image where the circle is drawn. * @param center Center of the circle. * @param radius Radius of the circle. * @param color Circle color. * @param thickness Thickness of the circle outline, if positive. Negative * thickness means that a filled circle is to be drawn. * @param lineType Type of the circle boundary. See the "line" description. * @param shift Number of fractional bits in the coordinates of the center and * in the radius value. * * @see org.opencv.core.Core.circle */ public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) { circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); return; } /** *

Draws a circle.

* *

The function circle draws a simple or filled circle with a given * center and radius.

* * @param img Image where the circle is drawn. * @param center Center of the circle. * @param radius Radius of the circle. * @param color Circle color. * @param thickness Thickness of the circle outline, if positive. Negative * thickness means that a filled circle is to be drawn. * * @see org.opencv.core.Core.circle */ public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) { circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a circle.

* *

The function circle draws a simple or filled circle with a given * center and radius.

* * @param img Image where the circle is drawn. * @param center Center of the circle. * @param radius Radius of the circle. * @param color Circle color. * * @see org.opencv.core.Core.circle */ public static void circle(Mat img, Point center, int radius, Scalar color) { circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) // /** *

Clips the line against the image rectangle.

* *

The functions clipLine calculate a part of the line segment that * is entirely within the specified rectangle. * They return false if the line segment is completely outside the * rectangle. Otherwise, they return true.

* * @param imgRect Image rectangle. * @param pt1 First line point. * @param pt2 Second line point. * * @see org.opencv.core.Core.clipLine */ public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) { double[] pt1_out = new double[2]; double[] pt2_out = new double[2]; boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out); if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } return retVal; } // // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) // /** *

Performs the per-element comparison of two arrays or an array and scalar * value.

* *

The function compares:

*
    *
  • Elements of two arrays when src1 and src2 * have the same size: *
* *

dst(I) = src1(I) cmpop src2(I)

* *
    *
  • Elements of src1 with a scalar src2 when * src2 is constructed from Scalar or has a single * element: *
* *

dst(I) = src1(I) cmpop src2

* *
    *
  • src1 with elements of src2 when * src1 is constructed from Scalar or has a single * element: *
* *

dst(I) = src1 cmpop src2(I)

* *

When the comparison result is true, the corresponding element of output array * is set to 255.The comparison operations can be replaced with the equivalent * matrix expressions:

* *

// C++ code:

* *

Mat dst1 = src1 >= src2;

* *

Mat dst2 = src1 < 8;...

* * @param src1 first input array or a scalar (in the case of cvCmp, * cv.Cmp, cvCmpS, cv.CmpS it is always * an array); when it is an array, it must have a single channel. * @param src2 second input array or a scalar (in the case of cvCmp * and cv.Cmp it is always an array; in the case of * cvCmpS, cv.CmpS it is always a scalar); when it is * an array, it must have a single channel. * @param dst output array that has the same size and type as the input arrays. * @param cmpop a flag, that specifies correspondence between the arrays: *
    *
  • CMP_EQ src1 is equal to src2. *
  • CMP_GT src1 is greater than src2. *
  • CMP_GE src1 is greater than or equal to src2. *
  • CMP_LT src1 is less than src2. *
  • CMP_LE src1 is less than or equal to src2. *
  • CMP_NE src1 is unequal to src2. *
* * @see org.opencv.core.Core.compare * @see org.opencv.imgproc.Imgproc#threshold * @see org.opencv.core.Core#max * @see org.opencv.core.Core#checkRange * @see org.opencv.core.Core#min */ public static void compare(Mat src1, Mat src2, Mat dst, int cmpop) { compare_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, cmpop); return; } // // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) // /** *

Performs the per-element comparison of two arrays or an array and scalar * value.

* *

The function compares:

*
    *
  • Elements of two arrays when src1 and src2 * have the same size: *
* *

dst(I) = src1(I) cmpop src2(I)

* *
    *
  • Elements of src1 with a scalar src2 when * src2 is constructed from Scalar or has a single * element: *
* *

dst(I) = src1(I) cmpop src2

* *
    *
  • src1 with elements of src2 when * src1 is constructed from Scalar or has a single * element: *
* *

dst(I) = src1 cmpop src2(I)

* *

When the comparison result is true, the corresponding element of output array * is set to 255.The comparison operations can be replaced with the equivalent * matrix expressions:

* *

// C++ code:

* *

Mat dst1 = src1 >= src2;

* *

Mat dst2 = src1 < 8;...

* * @param src1 first input array or a scalar (in the case of cvCmp, * cv.Cmp, cvCmpS, cv.CmpS it is always * an array); when it is an array, it must have a single channel. * @param src2 second input array or a scalar (in the case of cvCmp * and cv.Cmp it is always an array; in the case of * cvCmpS, cv.CmpS it is always a scalar); when it is * an array, it must have a single channel. * @param dst output array that has the same size and type as the input arrays. * @param cmpop a flag, that specifies correspondence between the arrays: *
    *
  • CMP_EQ src1 is equal to src2. *
  • CMP_GT src1 is greater than src2. *
  • CMP_GE src1 is greater than or equal to src2. *
  • CMP_LT src1 is less than src2. *
  • CMP_LE src1 is less than or equal to src2. *
  • CMP_NE src1 is unequal to src2. *
* * @see org.opencv.core.Core.compare * @see org.opencv.imgproc.Imgproc#threshold * @see org.opencv.core.Core#max * @see org.opencv.core.Core#checkRange * @see org.opencv.core.Core#min */ public static void compare(Mat src1, Scalar src2, Mat dst, int cmpop) { compare_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, cmpop); return; } // // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) // /** *

Copies the lower or the upper half of a square matrix to another half.

* *

The function completeSymm copies the lower half of a square * matrix to its another half. The matrix diagonal remains unchanged:

*
    *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true *
* * @param mtx input-output floating-point square matrix. * @param lowerToUpper operation flag; if true, the lower half is copied to the * upper half. Otherwise, the upper half is copied to the lower half. * * @see org.opencv.core.Core.completeSymm * @see org.opencv.core.Core#transpose * @see org.opencv.core.Core#flip */ public static void completeSymm(Mat mtx, boolean lowerToUpper) { completeSymm_0(mtx.nativeObj, lowerToUpper); return; } /** *

Copies the lower or the upper half of a square matrix to another half.

* *

The function completeSymm copies the lower half of a square * matrix to its another half. The matrix diagonal remains unchanged:

*
    *
  • mtx_(ij)=mtx_(ji) for i > j if lowerToUpper=false *
  • mtx_(ij)=mtx_(ji) for i < j if lowerToUpper=true *
* * @param mtx input-output floating-point square matrix. * * @see org.opencv.core.Core.completeSymm * @see org.opencv.core.Core#transpose * @see org.opencv.core.Core#flip */ public static void completeSymm(Mat mtx) { completeSymm_1(mtx.nativeObj); return; } // // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) // /** *

Scales, calculates absolute values, and converts the result to 8-bit.

* *

On each element of the input array, the function convertScaleAbs * performs three operations sequentially: scaling, taking an absolute value, * conversion to an unsigned 8-bit type:

* *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case * of multi-channel arrays, the function processes each channel independently. * When the output is not 8-bit, the operation can be emulated by calling the * Mat.convertTo method(or by using matrix expressions) and then * by calculating an absolute value of the result. For example: * <BR><code>

* *

// C++ code:

* *

Mat_ A(30,30);

* *

randu(A, Scalar(-100), Scalar(100));

* *

Mat_ B = A*5 + 3;

* *

B = abs(B);

* *

// Mat_ B = abs(A*5+3) will also do the job,

* *

// but it will allocate a temporary matrix

* * @param src input array. * @param dst output array. * @param alpha optional scale factor. * @param beta optional delta added to the scaled values. * * @see org.opencv.core.Core.convertScaleAbs * @see org.opencv.core.Mat#convertTo */ public static void convertScaleAbs(Mat src, Mat dst, double alpha, double beta) { convertScaleAbs_0(src.nativeObj, dst.nativeObj, alpha, beta); return; } /** *

Scales, calculates absolute values, and converts the result to 8-bit.

* *

On each element of the input array, the function convertScaleAbs * performs three operations sequentially: scaling, taking an absolute value, * conversion to an unsigned 8-bit type:

* *

dst(I)= saturate_cast<uchar>(| src(I)* alpha + beta|)<BR>In case * of multi-channel arrays, the function processes each channel independently. * When the output is not 8-bit, the operation can be emulated by calling the * Mat.convertTo method(or by using matrix expressions) and then * by calculating an absolute value of the result. For example: * <BR><code>

* *

// C++ code:

* *

Mat_ A(30,30);

* *

randu(A, Scalar(-100), Scalar(100));

* *

Mat_ B = A*5 + 3;

* *

B = abs(B);

* *

// Mat_ B = abs(A*5+3) will also do the job,

* *

// but it will allocate a temporary matrix

* * @param src input array. * @param dst output array. * * @see org.opencv.core.Core.convertScaleAbs * @see org.opencv.core.Mat#convertTo */ public static void convertScaleAbs(Mat src, Mat dst) { convertScaleAbs_1(src.nativeObj, dst.nativeObj); return; } // // C++: int countNonZero(Mat src) // /** *

Counts non-zero array elements.

* *

The function returns the number of non-zero elements in src :

* *

sum(by: I: src(I) != 0) 1

* * @param src single-channel array. * * @see org.opencv.core.Core.countNonZero * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#meanStdDev * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#mean */ public static int countNonZero(Mat src) { int retVal = countNonZero_0(src.nativeObj); return retVal; } // // C++: float cubeRoot(float val) // /** *

Computes the cube root of an argument.

* *

The function cubeRoot computes sqrt3(val). Negative * arguments are handled correctly. NaN and Inf are not handled. The accuracy * approaches the maximum possible accuracy for single-precision data.

* * @param val A function argument. * * @see org.opencv.core.Core.cubeRoot */ public static float cubeRoot(float val) { float retVal = cubeRoot_0(val); return retVal; } // // C++: void dct(Mat src, Mat& dst, int flags = 0) // /** *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

* *

The function dct performs a forward or inverse discrete Cosine * transform (DCT) of a 1D or 2D floating-point array:

*
    *
  • Forward Cosine transform of a 1D vector of N elements: *
* *

Y = C^N * X

* *

where

* *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

* *

and

* *

alpha_0=1, alpha_j=2 for *j > 0*.

*
    *
  • Inverse Cosine transform of a 1D vector of N elements: *
* *

X = (C^N)^(-1) * Y = (C^N)^T * Y

* *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

*
    *
  • Forward 2D Cosine transform of M x N matrix: *
* *

Y = C^N * X * (C^N)^T

* *
    *
  • Inverse 2D Cosine transform of M x N matrix: *
* *

X = (C^N)^T * X * C^N

* *

The function chooses the mode of operation by looking at the flags and size * of the input array:

*
    *
  • If (flags & DCT_INVERSE) == 0, the function does a * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D * transform of each row. *
  • If the array is a single column or a single row, the function performs * a 1D transform. *
  • If none of the above is true, the function performs a 2D transform. *
* *

Note:

* *

Currently dct supports even-size arrays (2, 4, 6...). For data * analysis and approximation, you can pad the array when necessary.

* *

Also, the function performance depends very much, and not monotonically, on * the array size (see"getOptimalDFTSize"). In the current implementation DCT of * a vector of size N is calculated via DFT of a vector of size * N/2. Thus, the optimal DCT size N1 >= N can be * calculated as:

* *

// C++ code:

* *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

* *

N1 = getOptimalDCTSize(N);

* *

* * @param src input floating-point array. * @param dst output array of the same size and type as src. * @param flags transformation flags as a combination of the following values: *
    *
  • DCT_INVERSE performs an inverse 1D or 2D transform instead of the * default forward transform. *
  • DCT_ROWS performs a forward or inverse transform of every individual * row of the input matrix. This flag enables you to transform multiple vectors * simultaneously and can be used to decrease the overhead (which is sometimes * several times larger than the processing itself) to perform 3D and * higher-dimensional transforms and so forth. *
* * @see org.opencv.core.Core.dct * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#idct * @see org.opencv.core.Core#getOptimalDFTSize */ public static void dct(Mat src, Mat dst, int flags) { dct_0(src.nativeObj, dst.nativeObj, flags); return; } /** *

Performs a forward or inverse discrete Cosine transform of 1D or 2D array.

* *

The function dct performs a forward or inverse discrete Cosine * transform (DCT) of a 1D or 2D floating-point array:

*
    *
  • Forward Cosine transform of a 1D vector of N elements: *
* *

Y = C^N * X

* *

where

* *

C^N_(jk)= sqrt(alpha_j/N) cos((pi(2k+1)j)/(2N))

* *

and

* *

alpha_0=1, alpha_j=2 for *j > 0*.

*
    *
  • Inverse Cosine transform of a 1D vector of N elements: *
* *

X = (C^N)^(-1) * Y = (C^N)^T * Y

* *

(since C^N is an orthogonal matrix, C^N * (C^N)^T = I)

*
    *
  • Forward 2D Cosine transform of M x N matrix: *
* *

Y = C^N * X * (C^N)^T

* *
    *
  • Inverse 2D Cosine transform of M x N matrix: *
* *

X = (C^N)^T * X * C^N

* *

The function chooses the mode of operation by looking at the flags and size * of the input array:

*
    *
  • If (flags & DCT_INVERSE) == 0, the function does a * forward 1D or 2D transform. Otherwise, it is an inverse 1D or 2D transform. *
  • If (flags & DCT_ROWS) != 0, the function performs a 1D * transform of each row. *
  • If the array is a single column or a single row, the function performs * a 1D transform. *
  • If none of the above is true, the function performs a 2D transform. *
* *

Note:

* *

Currently dct supports even-size arrays (2, 4, 6...). For data * analysis and approximation, you can pad the array when necessary.

* *

Also, the function performance depends very much, and not monotonically, on * the array size (see"getOptimalDFTSize"). In the current implementation DCT of * a vector of size N is calculated via DFT of a vector of size * N/2. Thus, the optimal DCT size N1 >= N can be * calculated as:

* *

// C++ code:

* *

size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }

* *

N1 = getOptimalDCTSize(N);

* *

* * @param src input floating-point array. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.dct * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#idct * @see org.opencv.core.Core#getOptimalDFTSize */ public static void dct(Mat src, Mat dst) { dct_1(src.nativeObj, dst.nativeObj); return; } // // C++: double determinant(Mat mtx) // /** *

Returns the determinant of a square floating-point matrix.

* *

The function determinant calculates and returns the determinant * of the specified matrix. For small matrices (mtx.cols=mtx.rows<=3), * the direct method is used. For larger matrices, the function uses LU * factorization with partial pivoting.

* *

For symmetric positively-determined matrices, it is also possible to use * "eigen" decomposition to calculate the determinant.

* * @param mtx input matrix that must have CV_32FC1 or * CV_64FC1 type and square size. * * @see org.opencv.core.Core.determinant * @see org.opencv.core.Core#invert * @see org.opencv.core.Core#solve * @see org.opencv.core.Core#eigen * @see org.opencv.core.Core#trace */ public static double determinant(Mat mtx) { double retVal = determinant_0(mtx.nativeObj); return retVal; } // // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) // /** *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D * floating-point array.

* *

The function performs one of the following:

*
    *
  • Forward the Fourier transform of a 1D vector of N * elements: *
* *

Y = F^N * X,

* *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

*
    *
  • Inverse the Fourier transform of a 1D vector of N * elements: *
* *

X'= (F^N)^(-1) * Y = (F^N)^* * y * X = (1/N) * X,

* *

where F^*=(Re(F^N)-Im(F^N))^T

*
    *
  • Forward the 2D Fourier transform of a M x N matrix: *
* *

Y = F^M * X * F^N

* *
    *
  • Inverse the 2D Fourier transform of a M x N matrix: *
* *

X'= (F^M)^* * Y * (F^N)^* * X = 1/(M * N) * X'

* *

In case of real (single-channel) data, the output spectrum of the forward * Fourier transform or input spectrum of the inverse Fourier transform can be * represented in a packed format called *CCS* (complex-conjugate-symmetrical). * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D * *CCS* spectrum looks:

* *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) * Im Y_(0,N/2-1) Re Y_(0,N/2) * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im * Y_(1,N/2-1) Re Y_(1,N/2) * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im * Y_(2,N/2-1) Im Y_(1,N/2)........................... * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

* *

In case of 1D transform of a real vector, the output looks like the first row * of the matrix above.

* *

So, the function chooses an operation mode depending on the flags and size of * the input array:

*
    *
  • If DFT_ROWS is set or the input array has a single row or * single column, the function performs a 1D forward or inverse transform of * each row of a matrix when DFT_ROWS is set. Otherwise, it * performs a 2D transform. *
  • If the input array is real and DFT_INVERSE is not set, * the function performs a forward 1D or 2D transform: *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex * matrix of the same size as input. *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real * matrix of the same size as input. In case of 2D transform, it uses the packed * format as shown above. In case of a single 1D transform, it looks like the * first row of the matrix above. In case of multiple 1D transforms (when using * the DFT_ROWS flag), each row of the output matrix looks like the * first row of the matrix above. *
  • If the input array is complex and either DFT_INVERSE or * DFT_REAL_OUTPUT are not set, the output is a complex array of * the same size as input. The function performs a forward or inverse 1D or 2D * transform of the whole input array or each row of the input array * independently, depending on the flags DFT_INVERSE and * DFT_ROWS. *
  • When DFT_INVERSE is set and the input array is real, or * it is complex but DFT_REAL_OUTPUT is set, the output is a real * array of the same size as input. The function performs a 1D or 2D inverse * transformation of the whole input array or each individual row, depending on * the flags DFT_INVERSE and DFT_ROWS. *
* *

If DFT_SCALE is set, the scaling is done after the * transformation.

* *

Unlike "dct", the function supports arrays of arbitrary size. But only those * arrays are processed efficiently, whose sizes can be factorized in a product * of small prime numbers (2, 3, and 5 in the current implementation). Such an * efficient DFT size can be calculated using the "getOptimalDFTSize" method. * The sample below illustrates how to calculate a DFT-based convolution of two * 2D real arrays:

* *

// C++ code:

* *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

* * *

// reallocate the output array if needed

* *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

* *

Size dftSize;

* *

// calculate the size of DFT transform

* *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

* *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

* *

// allocate temporary buffers and initialize them with 0's

* *

Mat tempA(dftSize, A.type(), Scalar.all(0));

* *

Mat tempB(dftSize, B.type(), Scalar.all(0));

* *

// copy A and B to the top-left corners of tempA and tempB, respectively

* *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

* *

A.copyTo(roiA);

* *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

* *

B.copyTo(roiB);

* *

// now transform the padded A & B in-place;

* *

// use "nonzeroRows" hint for faster processing

* *

dft(tempA, tempA, 0, A.rows);

* *

dft(tempB, tempB, 0, B.rows);

* *

// multiply the spectrums;

* *

// the function handles packed spectrum representations well

* *

mulSpectrums(tempA, tempB, tempA);

* *

// transform the product back from the frequency domain.

* *

// Even though all the result rows will be non-zero,

* *

// you need only the first C.rows of them, and thus you

* *

// pass nonzeroRows == C.rows

* *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

* *

// now copy the result back to C.

* *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

* *

// all the temporary buffers will be deallocated automatically

* * *

To optimize this sample, consider the following approaches:

*
    *
  • Since nonzeroRows != 0 is passed to the forward transform * calls and since A and B are copied to the top-left * corners of tempA and tempB, respectively, it is not * necessary to clear the whole tempA and tempB. It is * only necessary to clear the tempA.cols - A.cols * (tempB.cols - B.cols) rightmost columns of the matrices. *
  • This DFT-based convolution does not have to be applied to the whole * big arrays, especially if B is significantly smaller than * A or vice versa. Instead, you can calculate convolution by * parts. To do this, you need to split the output array C into * multiple tiles. For each tile, estimate which parts of A and * B are required to calculate convolution in this tile. If the * tiles in C are too small, the speed will decrease a lot because * of repeated work. In the ultimate case, when each tile in C is a * single pixel, the algorithm becomes equivalent to the naive convolution * algorithm. If the tiles are too big, the temporary arrays tempA * and tempB become too big and there is also a slowdown because of * bad cache locality. So, there is an optimal tile size somewhere in the * middle. *
  • If different tiles in C can be calculated in parallel * and, thus, the convolution is done by parts, the loop can be threaded. *
* *

All of the above improvements have been implemented in "matchTemplate" and * "filter2D". Therefore, by using them, you can get the performance even better * than with the above theoretically optimal implementation. Though, those two * functions actually calculate cross-correlation, not convolution, so you need * to "flip" the second convolution operand B vertically and * horizontally using "flip".

* *

Note:

*
    *
  • An example using the discrete fourier transform can be found at * opencv_source_code/samples/cpp/dft.cpp *
  • (Python) An example using the dft functionality to perform Wiener * deconvolution can be found at opencv_source/samples/python2/deconvolution.py *
  • (Python) An example rearranging the quadrants of a Fourier image can * be found at opencv_source/samples/python2/dft.py *
* * @param src input array that could be real or complex. * @param dst output array whose size and type depends on the flags. * @param flags transformation flags, representing a combination of the * following values: *
    *
  • DFT_INVERSE performs an inverse 1D or 2D transform instead of the * default forward transform. *
  • DFT_SCALE scales the result: divide it by the number of array * elements. Normally, it is combined with DFT_INVERSE. *
  • DFT_ROWS performs a forward or inverse transform of every individual * row of the input matrix; this flag enables you to transform multiple vectors * simultaneously and can be used to decrease the overhead (which is sometimes * several times larger than the processing itself) to perform 3D and * higher-dimensional transformations and so forth. *
  • DFT_COMPLEX_OUTPUT performs a forward transformation of 1D or 2D real * array; the result, though being a complex array, has complex-conjugate * symmetry (*CCS*, see the function description below for details), and such an * array can be packed into a real array of the same size as input, which is the * fastest option and which is what the function does by default; however, you * may wish to get a full complex array (for simpler spectrum analysis, and so * on) - pass the flag to enable the function to produce a full-size complex * output array. *
  • DFT_REAL_OUTPUT performs an inverse transformation of a 1D or 2D * complex array; the result is normally a complex array of the same size, * however, if the input array has conjugate-complex symmetry (for example, it * is a result of forward transformation with DFT_COMPLEX_OUTPUT * flag), the output is a real array; while the function itself does not check * whether the input is symmetrical or not, you can pass the flag and then the * function will assume the symmetry and produce the real output array (note * that when the input is packed into a real array and inverse transformation is * executed, the function treats the input as a packed complex-conjugate * symmetrical array, and the output will also be a real array). *
* @param nonzeroRows when the parameter is not zero, the function assumes that * only the first nonzeroRows rows of the input array * (DFT_INVERSE is not set) or only the first nonzeroRows * of the output array (DFT_INVERSE is set) contain non-zeros, * thus, the function can handle the rest of the rows more efficiently and save * some time; this technique is very useful for calculating array * cross-correlation or convolution using DFT. * * @see org.opencv.core.Core.dft * @see org.opencv.imgproc.Imgproc#matchTemplate * @see org.opencv.core.Core#mulSpectrums * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#flip * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#phase * @see org.opencv.core.Core#dct * @see org.opencv.imgproc.Imgproc#filter2D * @see org.opencv.core.Core#getOptimalDFTSize */ public static void dft(Mat src, Mat dst, int flags, int nonzeroRows) { dft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); return; } /** *

Performs a forward or inverse Discrete Fourier transform of a 1D or 2D * floating-point array.

* *

The function performs one of the following:

*
    *
  • Forward the Fourier transform of a 1D vector of N * elements: *
* *

Y = F^N * X,

* *

where F^N_(jk)=exp(-2pi i j k/N) and i=sqrt(-1)

*
    *
  • Inverse the Fourier transform of a 1D vector of N * elements: *
* *

X'= (F^N)^(-1) * Y = (F^N)^* * y * X = (1/N) * X,

* *

where F^*=(Re(F^N)-Im(F^N))^T

*
    *
  • Forward the 2D Fourier transform of a M x N matrix: *
* *

Y = F^M * X * F^N

* *
    *
  • Inverse the 2D Fourier transform of a M x N matrix: *
* *

X'= (F^M)^* * Y * (F^N)^* * X = 1/(M * N) * X'

* *

In case of real (single-channel) data, the output spectrum of the forward * Fourier transform or input spectrum of the inverse Fourier transform can be * represented in a packed format called *CCS* (complex-conjugate-symmetrical). * It was borrowed from IPL (Intel* Image Processing Library). Here is how 2D * *CCS* spectrum looks:

* *

Re Y_(0,0) Re Y_(0,1) Im Y_(0,1) Re Y_(0,2) Im Y_(0,2) *s Re Y_(0,N/2-1) * Im Y_(0,N/2-1) Re Y_(0,N/2) * Re Y_(1,0) Re Y_(1,1) Im Y_(1,1) Re Y_(1,2) Im Y_(1,2) *s Re Y_(1,N/2-1) Im * Y_(1,N/2-1) Re Y_(1,N/2) * Im Y_(1,0) Re Y_(2,1) Im Y_(2,1) Re Y_(2,2) Im Y_(2,2) *s Re Y_(2,N/2-1) Im * Y_(2,N/2-1) Im Y_(1,N/2)........................... * Re Y_(M/2-1,0) Re Y_(M-3,1) Im Y_(M-3,1)......... Re Y_(M-3,N/2-1) Im * Y_(M-3,N/2-1) Re Y_(M/2-1,N/2) * Im Y_(M/2-1,0) Re Y_(M-2,1) Im Y_(M-2,1)......... Re Y_(M-2,N/2-1) Im * Y_(M-2,N/2-1) Im Y_(M/2-1,N/2) * Re Y_(M/2,0) Re Y_(M-1,1) Im Y_(M-1,1)......... Re Y_(M-1,N/2-1) Im * Y_(M-1,N/2-1) Re Y_(M/2,N/2)

* *

In case of 1D transform of a real vector, the output looks like the first row * of the matrix above.

* *

So, the function chooses an operation mode depending on the flags and size of * the input array:

*
    *
  • If DFT_ROWS is set or the input array has a single row or * single column, the function performs a 1D forward or inverse transform of * each row of a matrix when DFT_ROWS is set. Otherwise, it * performs a 2D transform. *
  • If the input array is real and DFT_INVERSE is not set, * the function performs a forward 1D or 2D transform: *
  • When DFT_COMPLEX_OUTPUT is set, the output is a complex * matrix of the same size as input. *
  • When DFT_COMPLEX_OUTPUT is not set, the output is a real * matrix of the same size as input. In case of 2D transform, it uses the packed * format as shown above. In case of a single 1D transform, it looks like the * first row of the matrix above. In case of multiple 1D transforms (when using * the DFT_ROWS flag), each row of the output matrix looks like the * first row of the matrix above. *
  • If the input array is complex and either DFT_INVERSE or * DFT_REAL_OUTPUT are not set, the output is a complex array of * the same size as input. The function performs a forward or inverse 1D or 2D * transform of the whole input array or each row of the input array * independently, depending on the flags DFT_INVERSE and * DFT_ROWS. *
  • When DFT_INVERSE is set and the input array is real, or * it is complex but DFT_REAL_OUTPUT is set, the output is a real * array of the same size as input. The function performs a 1D or 2D inverse * transformation of the whole input array or each individual row, depending on * the flags DFT_INVERSE and DFT_ROWS. *
* *

If DFT_SCALE is set, the scaling is done after the * transformation.

* *

Unlike "dct", the function supports arrays of arbitrary size. But only those * arrays are processed efficiently, whose sizes can be factorized in a product * of small prime numbers (2, 3, and 5 in the current implementation). Such an * efficient DFT size can be calculated using the "getOptimalDFTSize" method. * The sample below illustrates how to calculate a DFT-based convolution of two * 2D real arrays:

* *

// C++ code:

* *

void convolveDFT(InputArray A, InputArray B, OutputArray C)

* * *

// reallocate the output array if needed

* *

C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());

* *

Size dftSize;

* *

// calculate the size of DFT transform

* *

dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);

* *

dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);

* *

// allocate temporary buffers and initialize them with 0's

* *

Mat tempA(dftSize, A.type(), Scalar.all(0));

* *

Mat tempB(dftSize, B.type(), Scalar.all(0));

* *

// copy A and B to the top-left corners of tempA and tempB, respectively

* *

Mat roiA(tempA, Rect(0,0,A.cols,A.rows));

* *

A.copyTo(roiA);

* *

Mat roiB(tempB, Rect(0,0,B.cols,B.rows));

* *

B.copyTo(roiB);

* *

// now transform the padded A & B in-place;

* *

// use "nonzeroRows" hint for faster processing

* *

dft(tempA, tempA, 0, A.rows);

* *

dft(tempB, tempB, 0, B.rows);

* *

// multiply the spectrums;

* *

// the function handles packed spectrum representations well

* *

mulSpectrums(tempA, tempB, tempA);

* *

// transform the product back from the frequency domain.

* *

// Even though all the result rows will be non-zero,

* *

// you need only the first C.rows of them, and thus you

* *

// pass nonzeroRows == C.rows

* *

dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);

* *

// now copy the result back to C.

* *

tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);

* *

// all the temporary buffers will be deallocated automatically

* * *

To optimize this sample, consider the following approaches:

*
    *
  • Since nonzeroRows != 0 is passed to the forward transform * calls and since A and B are copied to the top-left * corners of tempA and tempB, respectively, it is not * necessary to clear the whole tempA and tempB. It is * only necessary to clear the tempA.cols - A.cols * (tempB.cols - B.cols) rightmost columns of the matrices. *
  • This DFT-based convolution does not have to be applied to the whole * big arrays, especially if B is significantly smaller than * A or vice versa. Instead, you can calculate convolution by * parts. To do this, you need to split the output array C into * multiple tiles. For each tile, estimate which parts of A and * B are required to calculate convolution in this tile. If the * tiles in C are too small, the speed will decrease a lot because * of repeated work. In the ultimate case, when each tile in C is a * single pixel, the algorithm becomes equivalent to the naive convolution * algorithm. If the tiles are too big, the temporary arrays tempA * and tempB become too big and there is also a slowdown because of * bad cache locality. So, there is an optimal tile size somewhere in the * middle. *
  • If different tiles in C can be calculated in parallel * and, thus, the convolution is done by parts, the loop can be threaded. *
* *

All of the above improvements have been implemented in "matchTemplate" and * "filter2D". Therefore, by using them, you can get the performance even better * than with the above theoretically optimal implementation. Though, those two * functions actually calculate cross-correlation, not convolution, so you need * to "flip" the second convolution operand B vertically and * horizontally using "flip".

* *

Note:

*
    *
  • An example using the discrete fourier transform can be found at * opencv_source_code/samples/cpp/dft.cpp *
  • (Python) An example using the dft functionality to perform Wiener * deconvolution can be found at opencv_source/samples/python2/deconvolution.py *
  • (Python) An example rearranging the quadrants of a Fourier image can * be found at opencv_source/samples/python2/dft.py *
* * @param src input array that could be real or complex. * @param dst output array whose size and type depends on the flags. * * @see org.opencv.core.Core.dft * @see org.opencv.imgproc.Imgproc#matchTemplate * @see org.opencv.core.Core#mulSpectrums * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#flip * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#phase * @see org.opencv.core.Core#dct * @see org.opencv.imgproc.Imgproc#filter2D * @see org.opencv.core.Core#getOptimalDFTSize */ public static void dft(Mat src, Mat dst) { dft_1(src.nativeObj, dst.nativeObj); return; } // // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) // /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * @param scale scalar factor. * @param dtype optional depth of the output array; if -1, * dst will have depth src2.depth(), but in case of an * array-by-array division, you can only pass -1 when * src1.depth()==src2.depth(). * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Mat src2, Mat dst, double scale, int dtype) { divide_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); return; } /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * @param scale scalar factor. * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Mat src2, Mat dst, double scale) { divide_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); return; } /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Mat src2, Mat dst) { divide_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) // /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param scale scalar factor. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * @param dtype optional depth of the output array; if -1, * dst will have depth src2.depth(), but in case of an * array-by-array division, you can only pass -1 when * src1.depth()==src2.depth(). * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(double scale, Mat src2, Mat dst, int dtype) { divide_3(scale, src2.nativeObj, dst.nativeObj, dtype); return; } /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param scale scalar factor. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(double scale, Mat src2, Mat dst) { divide_4(scale, src2.nativeObj, dst.nativeObj); return; } // // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) // /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * @param scale scalar factor. * @param dtype optional depth of the output array; if -1, * dst will have depth src2.depth(), but in case of an * array-by-array division, you can only pass -1 when * src1.depth()==src2.depth(). * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Scalar src2, Mat dst, double scale, int dtype) { divide_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); return; } /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * @param scale scalar factor. * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Scalar src2, Mat dst, double scale) { divide_6(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); return; } /** *

Performs per-element division of two arrays or a scalar by an array.

* *

The functions divide divide one array by another:

* *

dst(I) = saturate(src1(I)*scale/src2(I))

* *

or a scalar by an array when there is no src1 :

* *

dst(I) = saturate(scale/src2(I))

* *

When src2(I) is zero, dst(I) will also be zero. * Different channels of multi-channel arrays are processed independently.

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src2. * * @see org.opencv.core.Core.divide * @see org.opencv.core.Core#multiply * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void divide(Mat src1, Scalar src2, Mat dst) { divide_7(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) // /** *

Calculates eigenvalues and eigenvectors of a symmetric matrix.

* *

The functions eigen calculate just eigenvalues, or eigenvalues * and eigenvectors of the symmetric matrix src :

* *

// C++ code:

* *

src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()

* *

Note: in the new and the old interfaces different ordering of eigenvalues and * eigenvectors parameters is used. *

* * @param src input matrix that must have CV_32FC1 or * CV_64FC1 type, square size and be symmetrical (src^"T" * == src). * @param computeEigenvectors a computeEigenvectors * @param eigenvalues output vector of eigenvalues of the same type as * src; the eigenvalues are stored in the descending order. * @param eigenvectors output matrix of eigenvectors; it has the same size and * type as src; the eigenvectors are stored as subsequent matrix * rows, in the same order as the corresponding eigenvalues. * * @see org.opencv.core.Core.eigen * @see org.opencv.core.Core#completeSymm */ public static boolean eigen(Mat src, boolean computeEigenvectors, Mat eigenvalues, Mat eigenvectors) { boolean retVal = eigen_0(src.nativeObj, computeEigenvectors, eigenvalues.nativeObj, eigenvectors.nativeObj); return retVal; } // // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) // /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param center Center of the ellipse. * @param axes Half of the size of the ellipse main axes. * @param angle Ellipse rotation angle in degrees. * @param startAngle Starting angle of the elliptic arc in degrees. * @param endAngle Ending angle of the elliptic arc in degrees. * @param color Ellipse color. * @param thickness Thickness of the ellipse arc outline, if positive. * Otherwise, this indicates that a filled ellipse sector is to be drawn. * @param lineType Type of the ellipse boundary. See the "line" description. * @param shift Number of fractional bits in the coordinates of the center and * values of axes. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) { ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); return; } /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param center Center of the ellipse. * @param axes Half of the size of the ellipse main axes. * @param angle Ellipse rotation angle in degrees. * @param startAngle Starting angle of the elliptic arc in degrees. * @param endAngle Ending angle of the elliptic arc in degrees. * @param color Ellipse color. * @param thickness Thickness of the ellipse arc outline, if positive. * Otherwise, this indicates that a filled ellipse sector is to be drawn. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) { ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param center Center of the ellipse. * @param axes Half of the size of the ellipse main axes. * @param angle Ellipse rotation angle in degrees. * @param startAngle Starting angle of the elliptic arc in degrees. * @param endAngle Ending angle of the elliptic arc in degrees. * @param color Ellipse color. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) { ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) // /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param box Alternative ellipse representation via "RotatedRect" or * CvBox2D. This means that the function draws an ellipse inscribed * in the rotated rectangle. * @param color Ellipse color. * @param thickness Thickness of the ellipse arc outline, if positive. * Otherwise, this indicates that a filled ellipse sector is to be drawn. * @param lineType Type of the ellipse boundary. See the "line" description. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) { ellipse_3(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); return; } /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param box Alternative ellipse representation via "RotatedRect" or * CvBox2D. This means that the function draws an ellipse inscribed * in the rotated rectangle. * @param color Ellipse color. * @param thickness Thickness of the ellipse arc outline, if positive. * Otherwise, this indicates that a filled ellipse sector is to be drawn. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) { ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a simple or thick elliptic arc or fills an ellipse sector.

* *

The functions ellipse with less parameters draw an ellipse * outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. * A piecewise-linear curve is used to approximate the elliptic arc boundary. If * you need more control of the ellipse rendering, you can retrieve the curve * using "ellipse2Poly" and then render it with "polylines" or fill it with * "fillPoly". If you use the first variant of the function and want to draw the * whole ellipse, not an arc, pass startAngle=0 and * endAngle=360. The figure below explains the meaning of the * parameters. * Figure 1. Parameters of Elliptic Arc

* * @param img Image. * @param box Alternative ellipse representation via "RotatedRect" or * CvBox2D. This means that the function draws an ellipse inscribed * in the rotated rectangle. * @param color Ellipse color. * * @see org.opencv.core.Core.ellipse */ public static void ellipse(Mat img, RotatedRect box, Scalar color) { ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) // /** *

Approximates an elliptic arc with a polyline.

* *

The function ellipse2Poly computes the vertices of a polyline * that approximates the specified elliptic arc. It is used by "ellipse".

* * @param center Center of the arc. * @param axes Half of the size of the ellipse main axes. See the "ellipse" for * details. * @param angle Rotation angle of the ellipse in degrees. See the "ellipse" for * details. * @param arcStart Starting angle of the elliptic arc in degrees. * @param arcEnd Ending angle of the elliptic arc in degrees. * @param delta Angle between the subsequent polyline vertices. It defines the * approximation accuracy. * @param pts Output vector of polyline vertices. * * @see org.opencv.core.Core.ellipse2Poly */ public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) { Mat pts_mat = pts; ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj); return; } // // C++: void exp(Mat src, Mat& dst) // /** *

Calculates the exponent of every array element.

* *

The function exp calculates the exponent of every element of the * input array:

* *

dst [I] = e^(src(I))

* *

The maximum relative error is about 7e-6 for single-precision * input and less than 1e-10 for double-precision input. Currently, * the function converts denormalized values to zeros on output. Special values * (NaN, Inf) are not handled.

* * @param src input array. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.exp * @see org.opencv.core.Core#log * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#pow * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#polarToCart * @see org.opencv.core.Core#phase */ public static void exp(Mat src, Mat dst) { exp_0(src.nativeObj, dst.nativeObj); return; } // // C++: void extractChannel(Mat src, Mat& dst, int coi) // public static void extractChannel(Mat src, Mat dst, int coi) { extractChannel_0(src.nativeObj, dst.nativeObj, coi); return; } // // C++: float fastAtan2(float y, float x) // /** *

Calculates the angle of a 2D vector in degrees.

* *

The function fastAtan2 calculates the full-range angle of an * input 2D vector. The angle is measured in degrees and varies from 0 to 360 * degrees. The accuracy is about 0.3 degrees.

* * @param y y-coordinate of the vector. * @param x x-coordinate of the vector. * * @see org.opencv.core.Core.fastAtan2 */ public static float fastAtan2(float y, float x) { float retVal = fastAtan2_0(y, x); return retVal; } // // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) // /** *

Fills a convex polygon.

* *

The function fillConvexPoly draws a filled convex polygon. * This function is much faster than the function fillPoly. It can * fill not only convex polygons but any monotonic polygon without * self-intersections, that is, a polygon whose contour intersects every * horizontal line (scan line) twice at the most (though, its top-most and/or * the bottom edge could be horizontal).

* * @param img Image. * @param points a points * @param color Polygon color. * @param lineType Type of the polygon boundaries. See the "line" description. * @param shift Number of fractional bits in the vertex coordinates. * * @see org.opencv.core.Core.fillConvexPoly */ public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) { Mat points_mat = points; fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift); return; } /** *

Fills a convex polygon.

* *

The function fillConvexPoly draws a filled convex polygon. * This function is much faster than the function fillPoly. It can * fill not only convex polygons but any monotonic polygon without * self-intersections, that is, a polygon whose contour intersects every * horizontal line (scan line) twice at the most (though, its top-most and/or * the bottom edge could be horizontal).

* * @param img Image. * @param points a points * @param color Polygon color. * * @see org.opencv.core.Core.fillConvexPoly */ public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) { Mat points_mat = points; fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) // /** *

Fills the area bounded by one or more polygons.

* *

The function fillPoly fills an area bounded by several polygonal * contours. The function can fill complex areas, for example, areas with holes, * contours with self-intersections (some of their parts), and so forth.

* * @param img Image. * @param pts Array of polygons where each polygon is represented as an array of * points. * @param color Polygon color. * @param lineType Type of the polygon boundaries. See the "line" description. * @param shift Number of fractional bits in the vertex coordinates. * @param offset Optional offset of all points of the contours. * * @see org.opencv.core.Core.fillPoly */ public static void fillPoly(Mat img, List pts, Scalar color, int lineType, int shift, Point offset) { List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y); return; } /** *

Fills the area bounded by one or more polygons.

* *

The function fillPoly fills an area bounded by several polygonal * contours. The function can fill complex areas, for example, areas with holes, * contours with self-intersections (some of their parts), and so forth.

* * @param img Image. * @param pts Array of polygons where each polygon is represented as an array of * points. * @param color Polygon color. * * @see org.opencv.core.Core.fillPoly */ public static void fillPoly(Mat img, List pts, Scalar color) { List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void findNonZero(Mat src, Mat& idx) // public static void findNonZero(Mat src, Mat idx) { findNonZero_0(src.nativeObj, idx.nativeObj); return; } // // C++: void flip(Mat src, Mat& dst, int flipCode) // /** *

Flips a 2D array around vertical, horizontal, or both axes.

* *

The function flip flips the array in one of three different ways * (row and column indices are 0-based):

* *

dst _(ij) =<BR> <= ft(<BR> ltBR gtsrc _(src.rows-i-1,j) if * flipCode = 0 * ltBR gtsrc _(i, src.cols -j-1) if flipCode gt 0 * ltBR gtsrc _(src.rows -i-1, src.cols -j-1) if flipCode lt 0 * ltBR gt<BR>right.

* *

The example scenarios of using the function are the following:

*
    *
  • Vertical flipping of the image (flipCode == 0) to switch * between top-left and bottom-left image origin. This is a typical operation in * video processing on Microsoft Windows* OS. *
  • Horizontal flipping of the image with the subsequent horizontal shift * and absolute difference calculation to check for a vertical-axis symmetry * (flipCode > 0). *
  • Simultaneous horizontal and vertical flipping of the image with the * subsequent shift and absolute difference calculation to check for a central * symmetry (flipCode < 0). *
  • Reversing the order of point arrays (flipCode > 0 or * flipCode == 0). *
* * @param src input array. * @param dst output array of the same size and type as src. * @param flipCode a flag to specify how to flip the array; 0 means flipping * around the x-axis and positive value (for example, 1) means flipping around * y-axis. Negative value (for example, -1) means flipping around both axes (see * the discussion below for the formulas). * * @see org.opencv.core.Core.flip * @see org.opencv.core.Core#repeat * @see org.opencv.core.Core#transpose * @see org.opencv.core.Core#completeSymm */ public static void flip(Mat src, Mat dst, int flipCode) { flip_0(src.nativeObj, dst.nativeObj, flipCode); return; } // // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) // /** *

Performs generalized matrix multiplication.

* *

The function performs generalized matrix multiplication similar to the * gemm functions in BLAS level 3. For example, gemm(src1, * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

* *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be * replaced with a matrix expression. For example, the above call can be * replaced with: <BR><code>

* *

// C++ code:

* *

dst = alpha*src1.t()*src2 + beta*src3.t();

* *

* * @param src1 first multiplied input matrix that should have CV_32FC1, * CV_64FC1, CV_32FC2, or CV_64FC2 type. * @param src2 second multiplied input matrix of the same type as * src1. * @param alpha weight of the matrix product. * @param src3 third optional delta matrix added to the matrix product; it * should have the same type as src1 and src2. * @param gamma a gamma * @param dst output matrix; it has the proper size and the same type as input * matrices. * @param flags operation flags: *
    *
  • GEMM_1_T transposes src1. *
  • GEMM_2_T transposes src2. *
  • GEMM_3_T transposes src3. *
* * @see org.opencv.core.Core.gemm * @see org.opencv.core.Core#mulTransposed * @see org.opencv.core.Core#transform */ public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst, int flags) { gemm_0(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj, flags); return; } /** *

Performs generalized matrix multiplication.

* *

The function performs generalized matrix multiplication similar to the * gemm functions in BLAS level 3. For example, gemm(src1, * src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T) corresponds to

* *

dst = alpha * src1 ^T * src2 + beta * src3 ^T<BR>The function can be * replaced with a matrix expression. For example, the above call can be * replaced with: <BR><code>

* *

// C++ code:

* *

dst = alpha*src1.t()*src2 + beta*src3.t();

* *

* * @param src1 first multiplied input matrix that should have CV_32FC1, * CV_64FC1, CV_32FC2, or CV_64FC2 type. * @param src2 second multiplied input matrix of the same type as * src1. * @param alpha weight of the matrix product. * @param src3 third optional delta matrix added to the matrix product; it * should have the same type as src1 and src2. * @param gamma a gamma * @param dst output matrix; it has the proper size and the same type as input * matrices. * * @see org.opencv.core.Core.gemm * @see org.opencv.core.Core#mulTransposed * @see org.opencv.core.Core#transform */ public static void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat dst) { gemm_1(src1.nativeObj, src2.nativeObj, alpha, src3.nativeObj, gamma, dst.nativeObj); return; } // // C++: string getBuildInformation() // /** *

Returns full configuration time cmake output.

* *

Returned value is raw cmake output including version control system revision, * compiler version, compiler flags, enabled modules and third party libraries, * etc. Output format depends on target architecture.

* * @see org.opencv.core.Core.getBuildInformation */ public static String getBuildInformation() { String retVal = getBuildInformation_0(); return retVal; } // // C++: int64 getCPUTickCount() // /** *

Returns the number of CPU ticks.

* *

The function returns the current number of CPU ticks on some architectures * (such as x86, x64, PowerPC). On other platforms the function is equivalent to * getTickCount. It can also be used for very accurate time * measurements, as well as for RNG initialization. Note that in case of * multi-CPU systems a thread, from which getCPUTickCount is * called, can be suspended and resumed at another CPU with its own counter. So, * theoretically (and practically) the subsequent calls to the function do not * necessary return the monotonously increasing values. Also, since a modern CPU * varies the CPU frequency depending on the load, the number of CPU clocks * spent in some code cannot be directly converted to time units. Therefore, * getTickCount is generally a preferable solution for measuring * execution time.

* * @see org.opencv.core.Core.getCPUTickCount */ public static long getCPUTickCount() { long retVal = getCPUTickCount_0(); return retVal; } // // C++: int getNumberOfCPUs() // /** *

Returns the number of logical CPUs available for the process.

* * @see org.opencv.core.Core.getNumberOfCPUs */ public static int getNumberOfCPUs() { int retVal = getNumberOfCPUs_0(); return retVal; } // // C++: int getOptimalDFTSize(int vecsize) // /** *

Returns the optimal DFT size for a given vector size.

* *

DFT performance is not a monotonic function of a vector size. Therefore, when * you calculate convolution of two arrays or perform the spectral analysis of * an array, it usually makes sense to pad the input data with zeros to get a * bit larger array that can be transformed much faster than the original one. * Arrays whose size is a power-of-two (2, 4, 8, 16, 32,...) are the fastest to * process. Though, the arrays whose size is a product of 2's, 3's, and 5's (for * example, 300 = 5*5*3*2*2) are also processed quite efficiently.

* *

The function getOptimalDFTSize returns the minimum number * N that is greater than or equal to vecsize so that * the DFT of a vector of size N can be processed efficiently. In * the current implementation N = 2^"p" * 3^"q" * 5^"r" for some * integer p, q, r.

* *

The function returns a negative number if vecsize is too large * (very close to INT_MAX).

* *

While the function cannot be used directly to estimate the optimal vector * size for DCT transform (since the current DCT implementation supports only * even-size vectors), it can be easily processed as getOptimalDFTSize((vecsize+1)/2)*2.

* * @param vecsize vector size. * * @see org.opencv.core.Core.getOptimalDFTSize * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#dct * @see org.opencv.core.Core#idct * @see org.opencv.core.Core#mulSpectrums * @see org.opencv.core.Core#idft */ public static int getOptimalDFTSize(int vecsize) { int retVal = getOptimalDFTSize_0(vecsize); return retVal; } // // C++: int64 getTickCount() // /** *

Returns the number of ticks.

* *

The function returns the number of ticks after the certain event (for * example, when the machine was turned on). * It can be used to initialize "RNG" or to measure a function execution time by * reading the tick count before and after the function call. See also the tick * frequency.

* * @see org.opencv.core.Core.getTickCount */ public static long getTickCount() { long retVal = getTickCount_0(); return retVal; } // // C++: double getTickFrequency() // /** *

Returns the number of ticks per second.

* *

The function returns the number of ticks per second.That is, the following * code computes the execution time in seconds:

* *

// C++ code:

* *

double t = (double)getTickCount();

* *

// do something...

* *

t = ((double)getTickCount() - t)/getTickFrequency();

* * @see org.opencv.core.Core.getTickFrequency */ public static double getTickFrequency() { double retVal = getTickFrequency_0(); return retVal; } // // C++: void hconcat(vector_Mat src, Mat& dst) // public static void hconcat(List src, Mat dst) { Mat src_mat = Converters.vector_Mat_to_Mat(src); hconcat_0(src_mat.nativeObj, dst.nativeObj); return; } // // C++: void idct(Mat src, Mat& dst, int flags = 0) // /** *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

* *

idct(src, dst, flags) is equivalent to dct(src, dst, flags * | DCT_INVERSE).

* * @param src input floating-point single-channel array. * @param dst output array of the same size and type as src. * @param flags operation flags. * * @see org.opencv.core.Core.idct * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#dct * @see org.opencv.core.Core#getOptimalDFTSize * @see org.opencv.core.Core#idft */ public static void idct(Mat src, Mat dst, int flags) { idct_0(src.nativeObj, dst.nativeObj, flags); return; } /** *

Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.

* *

idct(src, dst, flags) is equivalent to dct(src, dst, flags * | DCT_INVERSE).

* * @param src input floating-point single-channel array. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.idct * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#dct * @see org.opencv.core.Core#getOptimalDFTSize * @see org.opencv.core.Core#idft */ public static void idct(Mat src, Mat dst) { idct_1(src.nativeObj, dst.nativeObj); return; } // // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) // /** *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

* *

idft(src, dst, flags) is equivalent to dft(src, dst, flags * | DFT_INVERSE).

* *

See "dft" for details.

* *

Note: None of dft and idft scales the result by * default. So, you should pass DFT_SCALE to one of * dft or idft explicitly to make these transforms * mutually inverse.

* * @param src input floating-point real or complex array. * @param dst output array whose size and type depend on the flags. * @param flags operation flags (see "dft"). * @param nonzeroRows number of dst rows to process; the rest of * the rows have undefined content (see the convolution sample in "dft" * description. * * @see org.opencv.core.Core.idft * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#dct * @see org.opencv.core.Core#getOptimalDFTSize * @see org.opencv.core.Core#idct * @see org.opencv.core.Core#mulSpectrums */ public static void idft(Mat src, Mat dst, int flags, int nonzeroRows) { idft_0(src.nativeObj, dst.nativeObj, flags, nonzeroRows); return; } /** *

Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.

* *

idft(src, dst, flags) is equivalent to dft(src, dst, flags * | DFT_INVERSE).

* *

See "dft" for details.

* *

Note: None of dft and idft scales the result by * default. So, you should pass DFT_SCALE to one of * dft or idft explicitly to make these transforms * mutually inverse.

* * @param src input floating-point real or complex array. * @param dst output array whose size and type depend on the flags. * * @see org.opencv.core.Core.idft * @see org.opencv.core.Core#dft * @see org.opencv.core.Core#dct * @see org.opencv.core.Core#getOptimalDFTSize * @see org.opencv.core.Core#idct * @see org.opencv.core.Core#mulSpectrums */ public static void idft(Mat src, Mat dst) { idft_1(src.nativeObj, dst.nativeObj); return; } // // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) // /** *

Checks if array elements lie between the elements of two other arrays.

* *

The function checks the range as follows:

*
    *
  • For every element of a single-channel input array: *
* *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0

* *
    *
  • For two-channel arrays: *
* *

dst(I)= lowerb(I)_0 <= src(I)_0 <= upperb(I)_0 land lowerb(I)_1 <= * src(I)_1 <= upperb(I)_1

* *
    *
  • and so forth. *
* *

That is, dst (I) is set to 255 (all 1 -bits) if * src (I) is within the specified 1D, 2D, 3D,... box and 0 * otherwise.

* *

When the lower and/or upper boundary parameters are scalars, the indexes * (I) at lowerb and upperb in the above * formulas should be omitted.

* * @param src first input array. * @param lowerb inclusive lower boundary array or a scalar. * @param upperb inclusive upper boundary array or a scalar. * @param dst output array of the same size as src and * CV_8U type. * * @see org.opencv.core.Core.inRange */ public static void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat dst) { inRange_0(src.nativeObj, lowerb.val[0], lowerb.val[1], lowerb.val[2], lowerb.val[3], upperb.val[0], upperb.val[1], upperb.val[2], upperb.val[3], dst.nativeObj); return; } // // C++: void insertChannel(Mat src, Mat& dst, int coi) // public static void insertChannel(Mat src, Mat dst, int coi) { insertChannel_0(src.nativeObj, dst.nativeObj, coi); return; } // // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) // /** *

Finds the inverse or pseudo-inverse of a matrix.

* *

The function invert inverts the matrix src and * stores the result in dst. * When the matrix src is singular or non-square, the function * calculates the pseudo-inverse matrix (the dst matrix) so that * norm(src*dst - I) is minimal, where I is an identity matrix.

* *

In case of the DECOMP_LU method, the function returns non-zero * value if the inverse has been successfully calculated and 0 if * src is singular.

* *

In case of the DECOMP_SVD method, the function returns the * inverse condition number of src (the ratio of the smallest * singular value to the largest singular value) and 0 if src is * singular. The SVD method calculates a pseudo-inverse matrix if * src is singular.

* *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY * works only with non-singular square matrices that should also be symmetrical * and positively defined. In this case, the function stores the inverted matrix * in dst and returns non-zero. Otherwise, it returns 0.

* * @param src input floating-point M x N matrix. * @param dst output matrix of N x M size and the same type as * src. * @param flags inversion method : *
    *
  • DECOMP_LU Gaussian elimination with the optimal pivot element chosen. *
  • DECOMP_SVD singular value decomposition (SVD) method. *
  • DECOMP_CHOLESKY Cholesky decomposition; the matrix must be symmetrical * and positively defined. *
* * @see org.opencv.core.Core.invert * @see org.opencv.core.Core#solve */ public static double invert(Mat src, Mat dst, int flags) { double retVal = invert_0(src.nativeObj, dst.nativeObj, flags); return retVal; } /** *

Finds the inverse or pseudo-inverse of a matrix.

* *

The function invert inverts the matrix src and * stores the result in dst. * When the matrix src is singular or non-square, the function * calculates the pseudo-inverse matrix (the dst matrix) so that * norm(src*dst - I) is minimal, where I is an identity matrix.

* *

In case of the DECOMP_LU method, the function returns non-zero * value if the inverse has been successfully calculated and 0 if * src is singular.

* *

In case of the DECOMP_SVD method, the function returns the * inverse condition number of src (the ratio of the smallest * singular value to the largest singular value) and 0 if src is * singular. The SVD method calculates a pseudo-inverse matrix if * src is singular.

* *

Similarly to DECOMP_LU, the method DECOMP_CHOLESKY * works only with non-singular square matrices that should also be symmetrical * and positively defined. In this case, the function stores the inverted matrix * in dst and returns non-zero. Otherwise, it returns 0.

* * @param src input floating-point M x N matrix. * @param dst output matrix of N x M size and the same type as * src. * * @see org.opencv.core.Core.invert * @see org.opencv.core.Core#solve */ public static double invert(Mat src, Mat dst) { double retVal = invert_1(src.nativeObj, dst.nativeObj); return retVal; } // // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) // /** *

Finds centers of clusters and groups input samples around the clusters.

* *

The function kmeans implements a k-means algorithm that finds * the centers of cluster_count clusters and groups the input * samples around the clusters. As an output, labels_i contains a * 0-based cluster index for the sample stored in the i^(th) row of the * samples matrix.

* *

The function returns the compactness measure that is computed as

* *

sum _i|samples _i - centers _(labels _i)| ^2

* *

after every attempt. The best (minimum) value is chosen and the corresponding * labels and the compactness value are returned by the function. * Basically, you can use only the core of the function, set the number of * attempts to 1, initialize labels each time using a custom algorithm, pass * them with the (flags = KMEANS_USE_INITIAL_LABELS) * flag, and then choose the best (most-compact) clustering.

* *

Note:

*
    *
  • An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp *
  • (Python) An example on K-means clustering can be found at * opencv_source_code/samples/python2/kmeans.py *
* * @param data Data for clustering. * @param K Number of clusters to split the set by. * @param bestLabels a bestLabels * @param criteria The algorithm termination criteria, that is, the maximum * number of iterations and/or the desired accuracy. The accuracy is specified * as criteria.epsilon. As soon as each of the cluster centers * moves by less than criteria.epsilon on some iteration, the * algorithm stops. * @param attempts Flag to specify the number of times the algorithm is executed * using different initial labellings. The algorithm returns the labels that * yield the best compactness (see the last function parameter). * @param flags Flag that can take the following values: *
    *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by * Arthur and Vassilvitskii [Arthur2007]. *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) * attempt, use the user-supplied labels instead of computing them from the * initial centers. For the second and further attempts, use the random or * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify * the exact method. *
* @param centers Output matrix of the cluster centers, one row per each cluster * center. * * @see org.opencv.core.Core.kmeans */ public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags, Mat centers) { double retVal = kmeans_0(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags, centers.nativeObj); return retVal; } /** *

Finds centers of clusters and groups input samples around the clusters.

* *

The function kmeans implements a k-means algorithm that finds * the centers of cluster_count clusters and groups the input * samples around the clusters. As an output, labels_i contains a * 0-based cluster index for the sample stored in the i^(th) row of the * samples matrix.

* *

The function returns the compactness measure that is computed as

* *

sum _i|samples _i - centers _(labels _i)| ^2

* *

after every attempt. The best (minimum) value is chosen and the corresponding * labels and the compactness value are returned by the function. * Basically, you can use only the core of the function, set the number of * attempts to 1, initialize labels each time using a custom algorithm, pass * them with the (flags = KMEANS_USE_INITIAL_LABELS) * flag, and then choose the best (most-compact) clustering.

* *

Note:

*
    *
  • An example on K-means clustering can be found at opencv_source_code/samples/cpp/kmeans.cpp *
  • (Python) An example on K-means clustering can be found at * opencv_source_code/samples/python2/kmeans.py *
* * @param data Data for clustering. * @param K Number of clusters to split the set by. * @param bestLabels a bestLabels * @param criteria The algorithm termination criteria, that is, the maximum * number of iterations and/or the desired accuracy. The accuracy is specified * as criteria.epsilon. As soon as each of the cluster centers * moves by less than criteria.epsilon on some iteration, the * algorithm stops. * @param attempts Flag to specify the number of times the algorithm is executed * using different initial labellings. The algorithm returns the labels that * yield the best compactness (see the last function parameter). * @param flags Flag that can take the following values: *
    *
  • KMEANS_RANDOM_CENTERS Select random initial centers in each attempt. *
  • KMEANS_PP_CENTERS Use kmeans++ center initialization by * Arthur and Vassilvitskii [Arthur2007]. *
  • KMEANS_USE_INITIAL_LABELS During the first (and possibly the only) * attempt, use the user-supplied labels instead of computing them from the * initial centers. For the second and further attempts, use the random or * semi-random centers. Use one of KMEANS_*_CENTERS flag to specify * the exact method. *
* * @see org.opencv.core.Core.kmeans */ public static double kmeans(Mat data, int K, Mat bestLabels, TermCriteria criteria, int attempts, int flags) { double retVal = kmeans_1(data.nativeObj, K, bestLabels.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, attempts, flags); return retVal; } // // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) // /** *

Draws a line segment connecting two points.

* *

The function line draws the line segment between * pt1 and pt2 points in the image. The line is * clipped by the image boundaries. For non-antialiased lines with integer * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. * Thick lines are drawn with rounding endings. * Antialiased lines are drawn using Gaussian filtering. To specify the line * color, you may use the macro CV_RGB(r, g, b).

* * @param img Image. * @param pt1 First point of the line segment. * @param pt2 Second point of the line segment. * @param color Line color. * @param thickness Line thickness. * @param lineType Type of the line: *
    *
  • 8 (or omitted) - 8-connected line. *
  • 4 - 4-connected line. *
  • CV_AA - antialiased line. *
* @param shift Number of fractional bits in the point coordinates. * * @see org.opencv.core.Core.line */ public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) { line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); return; } /** *

Draws a line segment connecting two points.

* *

The function line draws the line segment between * pt1 and pt2 points in the image. The line is * clipped by the image boundaries. For non-antialiased lines with integer * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. * Thick lines are drawn with rounding endings. * Antialiased lines are drawn using Gaussian filtering. To specify the line * color, you may use the macro CV_RGB(r, g, b).

* * @param img Image. * @param pt1 First point of the line segment. * @param pt2 Second point of the line segment. * @param color Line color. * @param thickness Line thickness. * * @see org.opencv.core.Core.line */ public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) { line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a line segment connecting two points.

* *

The function line draws the line segment between * pt1 and pt2 points in the image. The line is * clipped by the image boundaries. For non-antialiased lines with integer * coordinates, the 8-connected or 4-connected Bresenham algorithm is used. * Thick lines are drawn with rounding endings. * Antialiased lines are drawn using Gaussian filtering. To specify the line * color, you may use the macro CV_RGB(r, g, b).

* * @param img Image. * @param pt1 First point of the line segment. * @param pt2 Second point of the line segment. * @param color Line color. * * @see org.opencv.core.Core.line */ public static void line(Mat img, Point pt1, Point pt2, Scalar color) { line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void log(Mat src, Mat& dst) // /** *

Calculates the natural logarithm of every array element.

* *

The function log calculates the natural logarithm of the * absolute value of every element of the input array:

* *

dst(I) = log|src(I)| if src(I) != 0 ; C otherwise

* *

where C is a large negative number (about -700 in the current * implementation). * The maximum relative error is about 7e-6 for single-precision * input and less than 1e-10 for double-precision input. Special * values (NaN, Inf) are not handled.

* * @param src input array. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.log * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#pow * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#polarToCart * @see org.opencv.core.Core#exp * @see org.opencv.core.Core#phase */ public static void log(Mat src, Mat dst) { log_0(src.nativeObj, dst.nativeObj); return; } // // C++: void magnitude(Mat x, Mat y, Mat& magnitude) // /** *

Calculates the magnitude of 2D vectors.

* *

The function magnitude calculates the magnitude of 2D vectors * formed from the corresponding elements of x and y * arrays:

* *

dst(I) = sqrt(x(I)^2 + y(I)^2)

* * @param x floating-point array of x-coordinates of the vectors. * @param y floating-point array of y-coordinates of the vectors; it must have * the same size as x. * @param magnitude output array of the same size and type as x. * * @see org.opencv.core.Core.magnitude * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#phase * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#polarToCart */ public static void magnitude(Mat x, Mat y, Mat magnitude) { magnitude_0(x.nativeObj, y.nativeObj, magnitude.nativeObj); return; } // // C++: void max(Mat src1, Mat src2, Mat& dst) // /** *

Calculates per-element maximum of two arrays or an array and a scalar.

* *

The functions max calculate the per-element maximum of two * arrays:

* *

dst(I)= max(src1(I), src2(I))

* *

or array and a scalar:

* *

dst(I)= max(src1(I), value)

* *

In the second variant, when the input array is multi-channel, each channel is * compared with value independently.

* *

The first 3 variants of the function listed above are actually a part of * "MatrixExpressions". They return an expression object that can be further * either transformed/ assigned to a matrix, or passed to a function, and so on.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.max * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#inRange * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#min */ public static void max(Mat src1, Mat src2, Mat dst) { max_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void max(Mat src1, Scalar src2, Mat& dst) // /** *

Calculates per-element maximum of two arrays or an array and a scalar.

* *

The functions max calculate the per-element maximum of two * arrays:

* *

dst(I)= max(src1(I), src2(I))

* *

or array and a scalar:

* *

dst(I)= max(src1(I), value)

* *

In the second variant, when the input array is multi-channel, each channel is * compared with value independently.

* *

The first 3 variants of the function listed above are actually a part of * "MatrixExpressions". They return an expression object that can be further * either transformed/ assigned to a matrix, or passed to a function, and so on.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.max * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#inRange * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#min */ public static void max(Mat src1, Scalar src2, Mat dst) { max_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: Scalar mean(Mat src, Mat mask = Mat()) // /** *

Calculates an average (mean) of array elements.

* *

The function mean calculates the mean value M of * array elements, independently for each channel, and return it:

* *

N = sum(by: I: mask(I) != 0) 1 * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

* *

When all the mask elements are 0's, the functions return Scalar.all(0).

* * @param src input array that should have from 1 to 4 channels so that the * result can be stored in "Scalar_". * @param mask optional operation mask. * * @see org.opencv.core.Core.mean * @see org.opencv.core.Core#countNonZero * @see org.opencv.core.Core#meanStdDev * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#minMaxLoc */ public static Scalar mean(Mat src, Mat mask) { Scalar retVal = new Scalar(mean_0(src.nativeObj, mask.nativeObj)); return retVal; } /** *

Calculates an average (mean) of array elements.

* *

The function mean calculates the mean value M of * array elements, independently for each channel, and return it:

* *

N = sum(by: I: mask(I) != 0) 1 * M_c = (sum(by: I: mask(I) != 0)(mtx(I)_c))/N

* *

When all the mask elements are 0's, the functions return Scalar.all(0).

* * @param src input array that should have from 1 to 4 channels so that the * result can be stored in "Scalar_". * * @see org.opencv.core.Core.mean * @see org.opencv.core.Core#countNonZero * @see org.opencv.core.Core#meanStdDev * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#minMaxLoc */ public static Scalar mean(Mat src) { Scalar retVal = new Scalar(mean_1(src.nativeObj)); return retVal; } // // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) // /** *

Calculates a mean and standard deviation of array elements.

* *

The function meanStdDev calculates the mean and the standard * deviation M of array elements independently for each channel and * returns it via the output parameters:

* *

N = sum(by: I, mask(I) != 0) 1 * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

* *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

* *

Note: The calculated standard deviation is only the diagonal of the complete * normalized covariance matrix. If the full matrix is needed, you can reshape * the multi-channel array M x N to the single-channel array * M*N x mtx.channels() (only possible when the matrix is * continuous) and then pass the matrix to "calcCovarMatrix".

* * @param src input array that should have from 1 to 4 channels so that the * results can be stored in "Scalar_" 's. * @param mean output parameter: calculated mean value. * @param stddev output parameter: calculateded standard deviation. * @param mask optional operation mask. * * @see org.opencv.core.Core.meanStdDev * @see org.opencv.core.Core#countNonZero * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#mean */ public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev, Mat mask) { Mat mean_mat = mean; Mat stddev_mat = stddev; meanStdDev_0(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj, mask.nativeObj); return; } /** *

Calculates a mean and standard deviation of array elements.

* *

The function meanStdDev calculates the mean and the standard * deviation M of array elements independently for each channel and * returns it via the output parameters:

* *

N = sum(by: I, mask(I) != 0) 1 * mean _c = (sum_(I: mask(I) != 0) src(I)_c)/(N) * stddev _c = sqrt((sum_(I: mask(I) != 0)(src(I)_c - mean _c)^2)/(N))

* *

When all the mask elements are 0's, the functions return mean=stddev=Scalar.all(0).

* *

Note: The calculated standard deviation is only the diagonal of the complete * normalized covariance matrix. If the full matrix is needed, you can reshape * the multi-channel array M x N to the single-channel array * M*N x mtx.channels() (only possible when the matrix is * continuous) and then pass the matrix to "calcCovarMatrix".

* * @param src input array that should have from 1 to 4 channels so that the * results can be stored in "Scalar_" 's. * @param mean output parameter: calculated mean value. * @param stddev output parameter: calculateded standard deviation. * * @see org.opencv.core.Core.meanStdDev * @see org.opencv.core.Core#countNonZero * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#mean */ public static void meanStdDev(Mat src, MatOfDouble mean, MatOfDouble stddev) { Mat mean_mat = mean; Mat stddev_mat = stddev; meanStdDev_1(src.nativeObj, mean_mat.nativeObj, stddev_mat.nativeObj); return; } // // C++: void merge(vector_Mat mv, Mat& dst) // /** *

Creates one multichannel array out of several single-channel ones.

* *

The functions merge merge several arrays to make a single * multi-channel array. That is, each element of the output array will be a * concatenation of the elements of the input arrays, where elements of i-th * input array are treated as mv[i].channels()-element vectors.

* *

The function "split" does the reverse operation. If you need to shuffle * channels in some other advanced way, use "mixChannels".

* * @param mv input array or vector of matrices to be merged; all the matrices in * mv must have the same size and the same depth. * @param dst output array of the same size and the same depth as * mv[0]; The number of channels will be the total number of * channels in the matrix array. * * @see org.opencv.core.Core.merge * @see org.opencv.core.Mat#reshape * @see org.opencv.core.Core#mixChannels * @see org.opencv.core.Core#split */ public static void merge(List mv, Mat dst) { Mat mv_mat = Converters.vector_Mat_to_Mat(mv); merge_0(mv_mat.nativeObj, dst.nativeObj); return; } // // C++: void min(Mat src1, Mat src2, Mat& dst) // /** *

Calculates per-element minimum of two arrays or an array and a scalar.

* *

The functions min calculate the per-element minimum of two * arrays:

* *

dst(I)= min(src1(I), src2(I))

* *

or array and a scalar:

* *

dst(I)= min(src1(I), value)

* *

In the second variant, when the input array is multi-channel, each channel is * compared with value independently.

* *

The first three variants of the function listed above are actually a part of * "MatrixExpressions". They return the expression object that can be further * either transformed/assigned to a matrix, or passed to a function, and so on.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.min * @see org.opencv.core.Core#max * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#inRange * @see org.opencv.core.Core#minMaxLoc */ public static void min(Mat src1, Mat src2, Mat dst) { min_0(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void min(Mat src1, Scalar src2, Mat& dst) // /** *

Calculates per-element minimum of two arrays or an array and a scalar.

* *

The functions min calculate the per-element minimum of two * arrays:

* *

dst(I)= min(src1(I), src2(I))

* *

or array and a scalar:

* *

dst(I)= min(src1(I), value)

* *

In the second variant, when the input array is multi-channel, each channel is * compared with value independently.

* *

The first three variants of the function listed above are actually a part of * "MatrixExpressions". They return the expression object that can be further * either transformed/assigned to a matrix, or passed to a function, and so on.

* * @param src1 first input array. * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.min * @see org.opencv.core.Core#max * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#inRange * @see org.opencv.core.Core#minMaxLoc */ public static void min(Mat src1, Scalar src2, Mat dst) { min_1(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) // /** *

Copies specified channels from input arrays to the specified channels of * output arrays.

* *

The functions mixChannels provide an advanced mechanism for * shuffling image channels.

* *

"split" and "merge" and some forms of "cvtColor" are partial cases of * mixChannels. * In the example below, the code splits a 4-channel RGBA image into a 3-channel * BGR (with R and B channels swapped) and a separate alpha-channel image: *

* *

// C++ code:

* *

Mat rgba(100, 100, CV_8UC4, Scalar(1,2,3,4));

* *

Mat bgr(rgba.rows, rgba.cols, CV_8UC3);

* *

Mat alpha(rgba.rows, rgba.cols, CV_8UC1);

* *

// forming an array of matrices is a quite efficient operation,

* *

// because the matrix data is not copied, only the headers

* *

Mat out[] = { bgr, alpha };

* *

// rgba[0] -> bgr[2], rgba[1] -> bgr[1],

* *

// rgba[2] -> bgr[0], rgba[3] -> alpha[0]

* *

int from_to[] = { 0,2, 1,1, 2,0, 3,3 };

* *

mixChannels(&rgba, 1, out, 2, from_to, 4);

* *

Note: Unlike many other new-style C++ functions in OpenCV (see the * introduction section and "Mat.create"), mixChannels requires * the output arrays to be pre-allocated before calling the function. *

* * @param src input array or vector of matricesl; all of the matrices must have * the same size and the same depth. * @param dst output array or vector of matrices; all the matrices *must be * allocated*; their size and depth must be the same as in src[0]. * @param fromTo array of index pairs specifying which channels are copied and * where; fromTo[k*2] is a 0-based index of the input channel in * src, fromTo[k*2+1] is an index of the output * channel in dst; the continuous channel numbering is used: the * first input image channels are indexed from 0 to * src[0].channels()-1, the second input image channels are indexed * from src[0].channels() to src[0].channels() + * src[1].channels()-1, and so on, the same scheme is used for the output * image channels; as a special case, when fromTo[k*2] is negative, * the corresponding output channel is filled with zero. * * @see org.opencv.core.Core.mixChannels * @see org.opencv.core.Core#merge * @see org.opencv.core.Core#split * @see org.opencv.imgproc.Imgproc#cvtColor */ public static void mixChannels(List src, List dst, MatOfInt fromTo) { Mat src_mat = Converters.vector_Mat_to_Mat(src); Mat dst_mat = Converters.vector_Mat_to_Mat(dst); Mat fromTo_mat = fromTo; mixChannels_0(src_mat.nativeObj, dst_mat.nativeObj, fromTo_mat.nativeObj); return; } // // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) // /** *

Performs the per-element multiplication of two Fourier spectrums.

* *

The function mulSpectrums performs the per-element * multiplication of the two CCS-packed or complex matrices that are results of * a real or complex Fourier transform.

* *

The function, together with "dft" and "idft", may be used to calculate * convolution (pass conjB=false) or correlation (pass * conjB=true) of two arrays rapidly. When the arrays are complex, * they are simply multiplied (per element) with an optional conjugation of the * second-array elements. When the arrays are real, they are assumed to be * CCS-packed (see "dft" for details).

* * @param a a a * @param b a b * @param c a c * @param flags operation flags; currently, the only supported flag is * DFT_ROWS, which indicates that each row of src1 and * src2 is an independent 1D Fourier spectrum. * @param conjB optional flag that conjugates the second input array before the * multiplication (true) or not (false). * * @see org.opencv.core.Core.mulSpectrums */ public static void mulSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB) { mulSpectrums_0(a.nativeObj, b.nativeObj, c.nativeObj, flags, conjB); return; } /** *

Performs the per-element multiplication of two Fourier spectrums.

* *

The function mulSpectrums performs the per-element * multiplication of the two CCS-packed or complex matrices that are results of * a real or complex Fourier transform.

* *

The function, together with "dft" and "idft", may be used to calculate * convolution (pass conjB=false) or correlation (pass * conjB=true) of two arrays rapidly. When the arrays are complex, * they are simply multiplied (per element) with an optional conjugation of the * second-array elements. When the arrays are real, they are assumed to be * CCS-packed (see "dft" for details).

* * @param a a a * @param b a b * @param c a c * @param flags operation flags; currently, the only supported flag is * DFT_ROWS, which indicates that each row of src1 and * src2 is an independent 1D Fourier spectrum. * * @see org.opencv.core.Core.mulSpectrums */ public static void mulSpectrums(Mat a, Mat b, Mat c, int flags) { mulSpectrums_1(a.nativeObj, b.nativeObj, c.nativeObj, flags); return; } // // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) // /** *

Calculates the product of a matrix and its transposition.

* *

The function mulTransposed calculates the product of * src and its transposition:

* *

dst = scale(src - delta)^T(src - delta)

* *

if aTa=true, and

* *

dst = scale(src - delta)(src - delta)^T

* *

otherwise. The function is used to calculate the covariance matrix. With zero * delta, it can be used as a faster substitute for general matrix product * A*B when B=A'

* * @param src input single-channel matrix. Note that unlike "gemm", the function * can multiply not only floating-point matrices. * @param dst output square matrix. * @param aTa Flag specifying the multiplication ordering. See the description * below. * @param delta Optional delta matrix subtracted from src before * the multiplication. When the matrix is empty (delta=noArray()), * it is assumed to be zero, that is, nothing is subtracted. If it has the same * size as src, it is simply subtracted. Otherwise, it is * "repeated" (see "repeat") to cover the full src and then * subtracted. Type of the delta matrix, when it is not empty, must be the same * as the type of created output matrix. See the dtype parameter * description below. * @param scale Optional scale factor for the matrix product. * @param dtype Optional type of the output matrix. When it is negative, the * output matrix will have the same type as src. Otherwise, it will * be type=CV_MAT_DEPTH(dtype) that should be either * CV_32F or CV_64F. * * @see org.opencv.core.Core.mulTransposed * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#repeat * @see org.opencv.core.Core#reduce * @see org.opencv.core.Core#gemm */ public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale, int dtype) { mulTransposed_0(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale, dtype); return; } /** *

Calculates the product of a matrix and its transposition.

* *

The function mulTransposed calculates the product of * src and its transposition:

* *

dst = scale(src - delta)^T(src - delta)

* *

if aTa=true, and

* *

dst = scale(src - delta)(src - delta)^T

* *

otherwise. The function is used to calculate the covariance matrix. With zero * delta, it can be used as a faster substitute for general matrix product * A*B when B=A'

* * @param src input single-channel matrix. Note that unlike "gemm", the function * can multiply not only floating-point matrices. * @param dst output square matrix. * @param aTa Flag specifying the multiplication ordering. See the description * below. * @param delta Optional delta matrix subtracted from src before * the multiplication. When the matrix is empty (delta=noArray()), * it is assumed to be zero, that is, nothing is subtracted. If it has the same * size as src, it is simply subtracted. Otherwise, it is * "repeated" (see "repeat") to cover the full src and then * subtracted. Type of the delta matrix, when it is not empty, must be the same * as the type of created output matrix. See the dtype parameter * description below. * @param scale Optional scale factor for the matrix product. * * @see org.opencv.core.Core.mulTransposed * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#repeat * @see org.opencv.core.Core#reduce * @see org.opencv.core.Core#gemm */ public static void mulTransposed(Mat src, Mat dst, boolean aTa, Mat delta, double scale) { mulTransposed_1(src.nativeObj, dst.nativeObj, aTa, delta.nativeObj, scale); return; } /** *

Calculates the product of a matrix and its transposition.

* *

The function mulTransposed calculates the product of * src and its transposition:

* *

dst = scale(src - delta)^T(src - delta)

* *

if aTa=true, and

* *

dst = scale(src - delta)(src - delta)^T

* *

otherwise. The function is used to calculate the covariance matrix. With zero * delta, it can be used as a faster substitute for general matrix product * A*B when B=A'

* * @param src input single-channel matrix. Note that unlike "gemm", the function * can multiply not only floating-point matrices. * @param dst output square matrix. * @param aTa Flag specifying the multiplication ordering. See the description * below. * * @see org.opencv.core.Core.mulTransposed * @see org.opencv.core.Core#calcCovarMatrix * @see org.opencv.core.Core#repeat * @see org.opencv.core.Core#reduce * @see org.opencv.core.Core#gemm */ public static void mulTransposed(Mat src, Mat dst, boolean aTa) { mulTransposed_2(src.nativeObj, dst.nativeObj, aTa); return; } // // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) // /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * @param scale optional scale factor. * @param dtype a dtype * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Mat src2, Mat dst, double scale, int dtype) { multiply_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale, dtype); return; } /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * @param scale optional scale factor. * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Mat src2, Mat dst, double scale) { multiply_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, scale); return; } /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Mat src2, Mat dst) { multiply_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) // /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * @param scale optional scale factor. * @param dtype a dtype * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Scalar src2, Mat dst, double scale, int dtype) { multiply_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale, dtype); return; } /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * @param scale optional scale factor. * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Scalar src2, Mat dst, double scale) { multiply_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, scale); return; } /** *

Calculates the per-element scaled product of two arrays.

* *

The function multiply calculates the per-element product of two * arrays:

* *

dst(I)= saturate(scale * src1(I) * src2(I))

* *

There is also a "MatrixExpressions" -friendly variant of the first function. * See "Mat.mul".

* *

For a not-per-element matrix product, see "gemm".

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.multiply * @see org.opencv.core.Core#divide * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.imgproc.Imgproc#accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulate * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Core#subtract * @see org.opencv.imgproc.Imgproc#accumulateProduct */ public static void multiply(Mat src1, Scalar src2, Mat dst) { multiply_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) // /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * @param normType type of the norm (see the details below). * @param mask optional operation mask; it must have the same size as * src1 and CV_8UC1 type. * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1, int normType, Mat mask) { double retVal = norm_0(src1.nativeObj, normType, mask.nativeObj); return retVal; } /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * @param normType type of the norm (see the details below). * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1, int normType) { double retVal = norm_1(src1.nativeObj, normType); return retVal; } /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1) { double retVal = norm_2(src1.nativeObj); return retVal; } // // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) // /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param normType type of the norm (see the details below). * @param mask optional operation mask; it must have the same size as * src1 and CV_8UC1 type. * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1, Mat src2, int normType, Mat mask) { double retVal = norm_3(src1.nativeObj, src2.nativeObj, normType, mask.nativeObj); return retVal; } /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * @param normType type of the norm (see the details below). * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1, Mat src2, int normType) { double retVal = norm_4(src1.nativeObj, src2.nativeObj, normType); return retVal; } /** *

Calculates an absolute array norm, an absolute difference norm, or a relative * difference norm.

* *

The functions norm calculate an absolute norm of * src1 (when there is no src2):

* *

norm = forkthree(|src1|_(L_(infty)) = max _I|src1(I)|)(if normType = * NORM_INF)<BR>(|src1|_(L_1) = sum _I|src1(I)|)(if normType = * NORM_L1)<BR>(|src1|_(L_2) = sqrt(sum_I src1(I)^2))(if normType = * NORM_L2)

* *

or an absolute or relative difference norm if src2 is there:

* *

norm = forkthree(|src1-src2|_(L_(infty)) = max _I|src1(I) - src2(I)|)(if * normType = NORM_INF)<BR>(|src1 - src2|_(L_1) = sum _I|src1(I) - * src2(I)|)(if normType = NORM_L1)<BR>(|src1 - src2|_(L_2) = * sqrt(sum_I(src1(I) - src2(I))^2))(if normType = NORM_L2)

* *

or

* *

norm = forkthree((|src1-src2|_(L_(infty)))/(|src2|_(L_(infty))))(if * normType = NORM_RELATIVE_INF)<BR>((|src1-src2|_(L_1))/(|src2|_(L_1)))(if * normType = NORM_RELATIVE_L1)<BR>((|src1-src2|_(L_2))/(|src2|_(L_2)))(if * normType = NORM_RELATIVE_L2)

* *

The functions norm return the calculated norm.

* *

When the mask parameter is specified and it is not empty, the * norm is calculated only over the region specified by the mask.

* *

A multi-channel input arrays are treated as a single-channel, that is, the * results for all channels are combined.

* * @param src1 first input array. * @param src2 second input array of the same size and the same type as * src1. * * @see org.opencv.core.Core.norm */ public static double norm(Mat src1, Mat src2) { double retVal = norm_5(src1.nativeObj, src2.nativeObj); return retVal; } // // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) // /** *

Normalizes the norm or value range of an array.

* *

The functions normalize scale and shift the input array elements * so that

* *

| dst|_(L_p)= alpha

* *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, * or NORM_L2, respectively; or so that

* *

min _I dst(I)= alpha, max _I dst(I)= beta

* *

when normType=NORM_MINMAX (for dense arrays only). * The optional mask specifies a sub-array to be normalized. This means that the * norm or min-n-max are calculated over the sub-array, and then this sub-array * is modified to be normalized. If you want to only use the mask to calculate * the norm or min-max but modify the whole array, you can use "norm" and * "Mat.convertTo".

* *

In case of sparse matrices, only the non-zero values are analyzed and * transformed. Because of this, the range transformation for sparse matrices is * not allowed since it can shift the zero level.

* * @param src input array. * @param dst output array of the same size as src. * @param alpha norm value to normalize to or the lower range boundary in case * of the range normalization. * @param beta upper range boundary in case of the range normalization; it is * not used for the norm normalization. * @param norm_type a norm_type * @param dtype when negative, the output array has the same type as * src; otherwise, it has the same number of channels as * src and the depth =CV_MAT_DEPTH(dtype). * @param mask optional operation mask. * * @see org.opencv.core.Core.normalize * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#norm */ public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype, Mat mask) { normalize_0(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype, mask.nativeObj); return; } /** *

Normalizes the norm or value range of an array.

* *

The functions normalize scale and shift the input array elements * so that

* *

| dst|_(L_p)= alpha

* *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, * or NORM_L2, respectively; or so that

* *

min _I dst(I)= alpha, max _I dst(I)= beta

* *

when normType=NORM_MINMAX (for dense arrays only). * The optional mask specifies a sub-array to be normalized. This means that the * norm or min-n-max are calculated over the sub-array, and then this sub-array * is modified to be normalized. If you want to only use the mask to calculate * the norm or min-max but modify the whole array, you can use "norm" and * "Mat.convertTo".

* *

In case of sparse matrices, only the non-zero values are analyzed and * transformed. Because of this, the range transformation for sparse matrices is * not allowed since it can shift the zero level.

* * @param src input array. * @param dst output array of the same size as src. * @param alpha norm value to normalize to or the lower range boundary in case * of the range normalization. * @param beta upper range boundary in case of the range normalization; it is * not used for the norm normalization. * @param norm_type a norm_type * @param dtype when negative, the output array has the same type as * src; otherwise, it has the same number of channels as * src and the depth =CV_MAT_DEPTH(dtype). * * @see org.opencv.core.Core.normalize * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#norm */ public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type, int dtype) { normalize_1(src.nativeObj, dst.nativeObj, alpha, beta, norm_type, dtype); return; } /** *

Normalizes the norm or value range of an array.

* *

The functions normalize scale and shift the input array elements * so that

* *

| dst|_(L_p)= alpha

* *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, * or NORM_L2, respectively; or so that

* *

min _I dst(I)= alpha, max _I dst(I)= beta

* *

when normType=NORM_MINMAX (for dense arrays only). * The optional mask specifies a sub-array to be normalized. This means that the * norm or min-n-max are calculated over the sub-array, and then this sub-array * is modified to be normalized. If you want to only use the mask to calculate * the norm or min-max but modify the whole array, you can use "norm" and * "Mat.convertTo".

* *

In case of sparse matrices, only the non-zero values are analyzed and * transformed. Because of this, the range transformation for sparse matrices is * not allowed since it can shift the zero level.

* * @param src input array. * @param dst output array of the same size as src. * @param alpha norm value to normalize to or the lower range boundary in case * of the range normalization. * @param beta upper range boundary in case of the range normalization; it is * not used for the norm normalization. * @param norm_type a norm_type * * @see org.opencv.core.Core.normalize * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#norm */ public static void normalize(Mat src, Mat dst, double alpha, double beta, int norm_type) { normalize_2(src.nativeObj, dst.nativeObj, alpha, beta, norm_type); return; } /** *

Normalizes the norm or value range of an array.

* *

The functions normalize scale and shift the input array elements * so that

* *

| dst|_(L_p)= alpha

* *

(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, * or NORM_L2, respectively; or so that

* *

min _I dst(I)= alpha, max _I dst(I)= beta

* *

when normType=NORM_MINMAX (for dense arrays only). * The optional mask specifies a sub-array to be normalized. This means that the * norm or min-n-max are calculated over the sub-array, and then this sub-array * is modified to be normalized. If you want to only use the mask to calculate * the norm or min-max but modify the whole array, you can use "norm" and * "Mat.convertTo".

* *

In case of sparse matrices, only the non-zero values are analyzed and * transformed. Because of this, the range transformation for sparse matrices is * not allowed since it can shift the zero level.

* * @param src input array. * @param dst output array of the same size as src. * * @see org.opencv.core.Core.normalize * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#norm */ public static void normalize(Mat src, Mat dst) { normalize_3(src.nativeObj, dst.nativeObj); return; } // // C++: void patchNaNs(Mat& a, double val = 0) // public static void patchNaNs(Mat a, double val) { patchNaNs_0(a.nativeObj, val); return; } public static void patchNaNs(Mat a) { patchNaNs_1(a.nativeObj); return; } // // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) // /** *

Performs the perspective matrix transformation of vectors.

* *

The function perspectiveTransform transforms every element of * src by treating it as a 2D or 3D vector, in the following way:

* *

(x, y, z) -> (x'/w, y'/w, z'/w)

* *

where

* *

(x', y', z', w') = mat * x y z 1

* *

and

* *

w = w' if w' != 0; infty otherwise

* *

Here a 3D vector transformation is shown. In case of a 2D vector * transformation, the z component is omitted.

* *

Note: The function transforms a sparse set of 2D or 3D vectors. If you want * to transform an image using perspective transformation, use "warpPerspective". * If you have an inverse problem, that is, you want to compute the most * probable perspective transformation out of several pairs of corresponding * points, you can use "getPerspectiveTransform" or "findHomography".

* * @param src input two-channel or three-channel floating-point array; each * element is a 2D/3D vector to be transformed. * @param dst output array of the same size and type as src. * @param m 3x3 or 4x4 floating-point transformation * matrix. * * @see org.opencv.core.Core.perspectiveTransform * @see org.opencv.imgproc.Imgproc#warpPerspective * @see org.opencv.core.Core#transform * @see org.opencv.imgproc.Imgproc#getPerspectiveTransform */ public static void perspectiveTransform(Mat src, Mat dst, Mat m) { perspectiveTransform_0(src.nativeObj, dst.nativeObj, m.nativeObj); return; } // // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) // /** *

Calculates the rotation angle of 2D vectors.

* *

The function phase calculates the rotation angle of each 2D * vector that is formed from the corresponding elements of x and * y :

* *

angle(I) = atan2(y(I), x(I))

* *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, * the corresponding angle(I) is set to 0.

* * @param x input floating-point array of x-coordinates of 2D vectors. * @param y input array of y-coordinates of 2D vectors; it must have the same * size and the same type as x. * @param angle output array of vector angles; it has the same size and same * type as x. * @param angleInDegrees when true, the function calculates the angle in * degrees, otherwise, they are measured in radians. * * @see org.opencv.core.Core.phase */ public static void phase(Mat x, Mat y, Mat angle, boolean angleInDegrees) { phase_0(x.nativeObj, y.nativeObj, angle.nativeObj, angleInDegrees); return; } /** *

Calculates the rotation angle of 2D vectors.

* *

The function phase calculates the rotation angle of each 2D * vector that is formed from the corresponding elements of x and * y :

* *

angle(I) = atan2(y(I), x(I))

* *

The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0, * the corresponding angle(I) is set to 0.

* * @param x input floating-point array of x-coordinates of 2D vectors. * @param y input array of y-coordinates of 2D vectors; it must have the same * size and the same type as x. * @param angle output array of vector angles; it has the same size and same * type as x. * * @see org.opencv.core.Core.phase */ public static void phase(Mat x, Mat y, Mat angle) { phase_1(x.nativeObj, y.nativeObj, angle.nativeObj); return; } // // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) // /** *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

* *

The function polarToCart calculates the Cartesian coordinates of * each 2D vector represented by the corresponding elements of magnitude * and angle :

* *

x(I) = magnitude(I) cos(angle(I)) * y(I) = magnitude(I) sin(angle(I)) *

* *

The relative accuracy of the estimated coordinates is about 1e-6.

* * @param magnitude input floating-point array of magnitudes of 2D vectors; it * can be an empty matrix (=Mat()), in this case, the function * assumes that all the magnitudes are =1; if it is not empty, it must have the * same size and type as angle. * @param angle input floating-point array of angles of 2D vectors. * @param x output array of x-coordinates of 2D vectors; it has the same size * and type as angle. * @param y output array of y-coordinates of 2D vectors; it has the same size * and type as angle. * @param angleInDegrees when true, the input angles are measured in degrees, * otherwise, they are measured in radians. * * @see org.opencv.core.Core.polarToCart * @see org.opencv.core.Core#log * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#pow * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#exp * @see org.opencv.core.Core#phase */ public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y, boolean angleInDegrees) { polarToCart_0(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj, angleInDegrees); return; } /** *

Calculates x and y coordinates of 2D vectors from their magnitude and angle.

* *

The function polarToCart calculates the Cartesian coordinates of * each 2D vector represented by the corresponding elements of magnitude * and angle :

* *

x(I) = magnitude(I) cos(angle(I)) * y(I) = magnitude(I) sin(angle(I)) *

* *

The relative accuracy of the estimated coordinates is about 1e-6.

* * @param magnitude input floating-point array of magnitudes of 2D vectors; it * can be an empty matrix (=Mat()), in this case, the function * assumes that all the magnitudes are =1; if it is not empty, it must have the * same size and type as angle. * @param angle input floating-point array of angles of 2D vectors. * @param x output array of x-coordinates of 2D vectors; it has the same size * and type as angle. * @param y output array of y-coordinates of 2D vectors; it has the same size * and type as angle. * * @see org.opencv.core.Core.polarToCart * @see org.opencv.core.Core#log * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#pow * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#magnitude * @see org.opencv.core.Core#exp * @see org.opencv.core.Core#phase */ public static void polarToCart(Mat magnitude, Mat angle, Mat x, Mat y) { polarToCart_1(magnitude.nativeObj, angle.nativeObj, x.nativeObj, y.nativeObj); return; } // // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) // /** *

Draws several polygonal curves.

* *

The function polylines draws one or more polygonal curves.

* * @param img Image. * @param pts Array of polygonal curves. * @param isClosed Flag indicating whether the drawn polylines are closed or * not. If they are closed, the function draws a line from the last vertex of * each curve to its first vertex. * @param color Polyline color. * @param thickness Thickness of the polyline edges. * @param lineType Type of the line segments. See the "line" description. * @param shift Number of fractional bits in the vertex coordinates. * * @see org.opencv.core.Core.polylines */ public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) { List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); return; } /** *

Draws several polygonal curves.

* *

The function polylines draws one or more polygonal curves.

* * @param img Image. * @param pts Array of polygonal curves. * @param isClosed Flag indicating whether the drawn polylines are closed or * not. If they are closed, the function draws a line from the last vertex of * each curve to its first vertex. * @param color Polyline color. * @param thickness Thickness of the polyline edges. * * @see org.opencv.core.Core.polylines */ public static void polylines(Mat img, List pts, boolean isClosed, Scalar color, int thickness) { List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws several polygonal curves.

* *

The function polylines draws one or more polygonal curves.

* * @param img Image. * @param pts Array of polygonal curves. * @param isClosed Flag indicating whether the drawn polylines are closed or * not. If they are closed, the function draws a line from the last vertex of * each curve to its first vertex. * @param color Polyline color. * * @see org.opencv.core.Core.polylines */ public static void polylines(Mat img, List pts, boolean isClosed, Scalar color) { List pts_tmplm = new ArrayList((pts != null) ? pts.size() : 0); Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void pow(Mat src, double power, Mat& dst) // /** *

Raises every array element to a power.

* *

The function pow raises every element of the input array to * power :

* *

dst(I) = src(I)^power if power is integer; |src(I)|^power * otherwise<BR>So, for a non-integer power exponent, the absolute values of * input array elements are used. However, it is possible to get true values for * negative values using some extra operations. In the example below, computing * the 5th root of array src shows: <BR><code>

* *

// C++ code:

* *

Mat mask = src < 0;

* *

pow(src, 1./5, dst);

* *

subtract(Scalar.all(0), dst, dst, mask);

* *

For some values of power, such as integer values, 0.5 and -0.5, * specialized faster algorithms are used. *

* *

Special values (NaN, Inf) are not handled.

* * @param src input array. * @param power exponent of power. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.pow * @see org.opencv.core.Core#cartToPolar * @see org.opencv.core.Core#polarToCart * @see org.opencv.core.Core#exp * @see org.opencv.core.Core#sqrt * @see org.opencv.core.Core#log */ public static void pow(Mat src, double power, Mat dst) { pow_0(src.nativeObj, power, dst.nativeObj); return; } // // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) // /** *

Draws a text string.

* *

The function putText renders the specified text string in the * image. * Symbols that cannot be rendered using the specified font are replaced by * question marks. See "getTextSize" for a text rendering code example.

* * @param img Image. * @param text Text string to be drawn. * @param org Bottom-left corner of the text string in the image. * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. * @param fontScale Font scale factor that is multiplied by the font-specific * base size. * @param color Text color. * @param thickness Thickness of the lines used to draw a text. * @param lineType Line type. See the line for details. * @param bottomLeftOrigin When true, the image data origin is at the * bottom-left corner. Otherwise, it is at the top-left corner. * * @see org.opencv.core.Core.putText */ public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) { putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin); return; } /** *

Draws a text string.

* *

The function putText renders the specified text string in the * image. * Symbols that cannot be rendered using the specified font are replaced by * question marks. See "getTextSize" for a text rendering code example.

* * @param img Image. * @param text Text string to be drawn. * @param org Bottom-left corner of the text string in the image. * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. * @param fontScale Font scale factor that is multiplied by the font-specific * base size. * @param color Text color. * @param thickness Thickness of the lines used to draw a text. * * @see org.opencv.core.Core.putText */ public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) { putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a text string.

* *

The function putText renders the specified text string in the * image. * Symbols that cannot be rendered using the specified font are replaced by * question marks. See "getTextSize" for a text rendering code example.

* * @param img Image. * @param text Text string to be drawn. * @param org Bottom-left corner of the text string in the image. * @param fontFace Font type. One of FONT_HERSHEY_SIMPLEX, * FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, * FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, * FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, * or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID's can * be combined with FONT_HERSHEY_ITALIC to get the slanted letters. * @param fontScale Font scale factor that is multiplied by the font-specific * base size. * @param color Text color. * * @see org.opencv.core.Core.putText */ public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) { putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) // public static void randShuffle(Mat dst, double iterFactor) { randShuffle_0(dst.nativeObj, iterFactor); return; } public static void randShuffle(Mat dst) { randShuffle_1(dst.nativeObj); return; } // // C++: void randn(Mat& dst, double mean, double stddev) // /** *

Fills the array with normally distributed random numbers.

* *

The function randn fills the matrix dst with * normally distributed random numbers with the specified mean vector and the * standard deviation matrix. The generated random numbers are clipped to fit * the value range of the output array data type.

* * @param dst output array of random numbers; the array must be pre-allocated * and have 1 to 4 channels. * @param mean mean value (expectation) of the generated random numbers. * @param stddev standard deviation of the generated random numbers; it can be * either a vector (in which case a diagonal standard deviation matrix is * assumed) or a square matrix. * * @see org.opencv.core.Core.randn * @see org.opencv.core.Core#randu */ public static void randn(Mat dst, double mean, double stddev) { randn_0(dst.nativeObj, mean, stddev); return; } // // C++: void randu(Mat& dst, double low, double high) // /** *

Generates a single uniformly-distributed random number or an array of random * numbers.

* *

The template functions randu generate and return the next * uniformly-distributed random value of the specified type. randu() * is an equivalent to (int)theRNG();, and so on. See "RNG" * description.

* *

The second non-template variant of the function fills the matrix * dst with uniformly-distributed random numbers from the specified * range:

* *

low _c <= dst(I)_c < high _c

* * @param dst output array of random numbers; the array must be pre-allocated. * @param low inclusive lower boundary of the generated random numbers. * @param high exclusive upper boundary of the generated random numbers. * * @see org.opencv.core.Core.randu * @see org.opencv.core.Core#randn */ public static void randu(Mat dst, double low, double high) { randu_0(dst.nativeObj, low, high); return; } // // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) // /** *

Draws a simple, thick, or filled up-right rectangle.

* *

The function rectangle draws a rectangle outline or a filled * rectangle whose two opposite corners are pt1 and * pt2, or r.tl() and r.br()-Point(1,1).

* * @param img Image. * @param pt1 Vertex of the rectangle. * @param pt2 Vertex of the rectangle opposite to pt1. * @param color Rectangle color or brightness (grayscale image). * @param thickness Thickness of lines that make up the rectangle. Negative * values, like CV_FILLED, mean that the function has to draw a * filled rectangle. * @param lineType Type of the line. See the "line" description. * @param shift Number of fractional bits in the point coordinates. * * @see org.opencv.core.Core.rectangle */ public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) { rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); return; } /** *

Draws a simple, thick, or filled up-right rectangle.

* *

The function rectangle draws a rectangle outline or a filled * rectangle whose two opposite corners are pt1 and * pt2, or r.tl() and r.br()-Point(1,1).

* * @param img Image. * @param pt1 Vertex of the rectangle. * @param pt2 Vertex of the rectangle opposite to pt1. * @param color Rectangle color or brightness (grayscale image). * @param thickness Thickness of lines that make up the rectangle. Negative * values, like CV_FILLED, mean that the function has to draw a * filled rectangle. * * @see org.opencv.core.Core.rectangle */ public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) { rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); return; } /** *

Draws a simple, thick, or filled up-right rectangle.

* *

The function rectangle draws a rectangle outline or a filled * rectangle whose two opposite corners are pt1 and * pt2, or r.tl() and r.br()-Point(1,1).

* * @param img Image. * @param pt1 Vertex of the rectangle. * @param pt2 Vertex of the rectangle opposite to pt1. * @param color Rectangle color or brightness (grayscale image). * * @see org.opencv.core.Core.rectangle */ public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) { rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); return; } // // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) // /** *

Reduces a matrix to a vector.

* *

The function reduce reduces the matrix to a vector by treating * the matrix rows/columns as a set of 1D vectors and performing the specified * operation on the vectors until a single row/column is obtained. For example, * the function can be used to compute horizontal and vertical projections of a * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, * the output may have a larger element bit-depth to preserve accuracy. And * multi-channel arrays are also supported in these two reduction modes.

* * @param src input 2D matrix. * @param dst output vector. Its size and type is defined by dim * and dtype parameters. * @param dim dimension index along which the matrix is reduced. 0 means that * the matrix is reduced to a single row. 1 means that the matrix is reduced to * a single column. * @param rtype reduction operation that could be one of the following: *
    *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the * matrix. *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of * the matrix. *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all * rows/columns of the matrix. *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all * rows/columns of the matrix. *
* @param dtype when negative, the output vector will have the same type as the * input matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), * src.channels()). * * @see org.opencv.core.Core.reduce * @see org.opencv.core.Core#repeat */ public static void reduce(Mat src, Mat dst, int dim, int rtype, int dtype) { reduce_0(src.nativeObj, dst.nativeObj, dim, rtype, dtype); return; } /** *

Reduces a matrix to a vector.

* *

The function reduce reduces the matrix to a vector by treating * the matrix rows/columns as a set of 1D vectors and performing the specified * operation on the vectors until a single row/column is obtained. For example, * the function can be used to compute horizontal and vertical projections of a * raster image. In case of CV_REDUCE_SUM and CV_REDUCE_AVG, * the output may have a larger element bit-depth to preserve accuracy. And * multi-channel arrays are also supported in these two reduction modes.

* * @param src input 2D matrix. * @param dst output vector. Its size and type is defined by dim * and dtype parameters. * @param dim dimension index along which the matrix is reduced. 0 means that * the matrix is reduced to a single row. 1 means that the matrix is reduced to * a single column. * @param rtype reduction operation that could be one of the following: *
    *
  • CV_REDUCE_SUM: the output is the sum of all rows/columns of the * matrix. *
  • CV_REDUCE_AVG: the output is the mean vector of all rows/columns of * the matrix. *
  • CV_REDUCE_MAX: the output is the maximum (column/row-wise) of all * rows/columns of the matrix. *
  • CV_REDUCE_MIN: the output is the minimum (column/row-wise) of all * rows/columns of the matrix. *
* * @see org.opencv.core.Core.reduce * @see org.opencv.core.Core#repeat */ public static void reduce(Mat src, Mat dst, int dim, int rtype) { reduce_1(src.nativeObj, dst.nativeObj, dim, rtype); return; } // // C++: void repeat(Mat src, int ny, int nx, Mat& dst) // /** *

Fills the output array with repeated copies of the input array.

* *

The functions "repeat" duplicate the input array one or more times along each * of the two axes:

* *

dst _(ij)= src _(i mod src.rows, j mod src.cols)

* *

The second variant of the function is more convenient to use with * "MatrixExpressions".

* * @param src input array to replicate. * @param ny Flag to specify how many times the src is repeated * along the vertical axis. * @param nx Flag to specify how many times the src is repeated * along the horizontal axis. * @param dst output array of the same type as src. * * @see org.opencv.core.Core.repeat * @see org.opencv.core.Core#reduce */ public static void repeat(Mat src, int ny, int nx, Mat dst) { repeat_0(src.nativeObj, ny, nx, dst.nativeObj); return; } // // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) // /** *

Calculates the sum of a scaled array and another array.

* *

The function scaleAdd is one of the classical primitive linear * algebra operations, known as DAXPY or SAXPY in BLAS * (http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It * calculates the sum of a scaled array and another array:

* *

dst(I)= scale * src1(I) + src2(I)<BR>The function can also be * emulated with a matrix expression, for example: <BR><code>

* *

// C++ code:

* *

Mat A(3, 3, CV_64F);...

* *

A.row(0) = A.row(1)*2 + A.row(2);

* * @param src1 first input array. * @param alpha a alpha * @param src2 second input array of the same size and type as src1. * @param dst output array of the same size and type as src1. * * @see org.opencv.core.Core.scaleAdd * @see org.opencv.core.Mat#dot * @see org.opencv.core.Mat#convertTo * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#subtract */ public static void scaleAdd(Mat src1, double alpha, Mat src2, Mat dst) { scaleAdd_0(src1.nativeObj, alpha, src2.nativeObj, dst.nativeObj); return; } // // C++: void setErrorVerbosity(bool verbose) // public static void setErrorVerbosity(boolean verbose) { setErrorVerbosity_0(verbose); return; } // // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) // /** *

Initializes a scaled identity matrix.

* *

The function "setIdentity" initializes a scaled identity matrix:

* *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be * emulated using the matrix initializers and the matrix expressions: * <BR><code>

* *

// C++ code:

* *

Mat A = Mat.eye(4, 3, CV_32F)*5;

* *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

* * @param mtx matrix to initialize (not necessarily square). * @param s a s * * @see org.opencv.core.Core.setIdentity * @see org.opencv.core.Mat#setTo * @see org.opencv.core.Mat#ones * @see org.opencv.core.Mat#zeros */ public static void setIdentity(Mat mtx, Scalar s) { setIdentity_0(mtx.nativeObj, s.val[0], s.val[1], s.val[2], s.val[3]); return; } /** *

Initializes a scaled identity matrix.

* *

The function "setIdentity" initializes a scaled identity matrix:

* *

mtx(i,j)= value if i=j; 0 otherwise<BR>The function can also be * emulated using the matrix initializers and the matrix expressions: * <BR><code>

* *

// C++ code:

* *

Mat A = Mat.eye(4, 3, CV_32F)*5;

* *

// A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]

* * @param mtx matrix to initialize (not necessarily square). * * @see org.opencv.core.Core.setIdentity * @see org.opencv.core.Mat#setTo * @see org.opencv.core.Mat#ones * @see org.opencv.core.Mat#zeros */ public static void setIdentity(Mat mtx) { setIdentity_1(mtx.nativeObj); return; } // // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) // /** *

Solves one or more linear systems or least-squares problems.

* *

The function solve solves a linear system or least-squares * problem (the latter is possible with SVD or QR methods, or by specifying the * flag DECOMP_NORMAL):

* *

dst = arg min _X|src1 * X - src2|

* *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the * function returns 1 if src1 (or src1^Tsrc1) is * non-singular. Otherwise, it returns 0. In the latter case, dst * is not valid. Other methods find a pseudo-solution in case of a singular * left-hand side part.

* *

Note: If you want to find a unity-norm solution of an under-defined singular * system src1*dst=0, the function solve will not do the * work. Use "SVD.solveZ" instead.

* * @param src1 input matrix on the left-hand side of the system. * @param src2 input matrix on the right-hand side of the system. * @param dst output solution. * @param flags solution (matrix inversion) method. *
    *
  • DECOMP_LU Gaussian elimination with optimal pivot element chosen. *
  • DECOMP_CHOLESKY Cholesky LL^T factorization; the matrix * src1 must be symmetrical and positively defined. *
  • DECOMP_EIG eigenvalue decomposition; the matrix src1 must * be symmetrical. *
  • DECOMP_SVD singular value decomposition (SVD) method; the system can * be over-defined and/or the matrix src1 can be singular. *
  • DECOMP_QR QR factorization; the system can be over-defined and/or the * matrix src1 can be singular. *
  • DECOMP_NORMAL while all the previous flags are mutually exclusive, * this flag can be used together with any of the previous; it means that the * normal equations src1^T*src1*dst=src1^Tsrc2 are solved instead of * the original system src1*dst=src2. *
* * @see org.opencv.core.Core.solve * @see org.opencv.core.Core#invert * @see org.opencv.core.Core#eigen */ public static boolean solve(Mat src1, Mat src2, Mat dst, int flags) { boolean retVal = solve_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, flags); return retVal; } /** *

Solves one or more linear systems or least-squares problems.

* *

The function solve solves a linear system or least-squares * problem (the latter is possible with SVD or QR methods, or by specifying the * flag DECOMP_NORMAL):

* *

dst = arg min _X|src1 * X - src2|

* *

If DECOMP_LU or DECOMP_CHOLESKY method is used, the * function returns 1 if src1 (or src1^Tsrc1) is * non-singular. Otherwise, it returns 0. In the latter case, dst * is not valid. Other methods find a pseudo-solution in case of a singular * left-hand side part.

* *

Note: If you want to find a unity-norm solution of an under-defined singular * system src1*dst=0, the function solve will not do the * work. Use "SVD.solveZ" instead.

* * @param src1 input matrix on the left-hand side of the system. * @param src2 input matrix on the right-hand side of the system. * @param dst output solution. * * @see org.opencv.core.Core.solve * @see org.opencv.core.Core#invert * @see org.opencv.core.Core#eigen */ public static boolean solve(Mat src1, Mat src2, Mat dst) { boolean retVal = solve_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); return retVal; } // // C++: int solveCubic(Mat coeffs, Mat& roots) // /** *

Finds the real roots of a cubic equation.

* *

The function solveCubic finds the real roots of a cubic * equation:

*
    *
  • if coeffs is a 4-element vector: *
* *

coeffs [0] x^3 + coeffs [1] x^2 + coeffs [2] x + coeffs [3] = 0

* *
    *
  • if coeffs is a 3-element vector: *
* *

x^3 + coeffs [0] x^2 + coeffs [1] x + coeffs [2] = 0

* *

The roots are stored in the roots array.

* * @param coeffs equation coefficients, an array of 3 or 4 elements. * @param roots output array of real roots that has 1 or 3 elements. * * @see org.opencv.core.Core.solveCubic */ public static int solveCubic(Mat coeffs, Mat roots) { int retVal = solveCubic_0(coeffs.nativeObj, roots.nativeObj); return retVal; } // // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) // /** *

Finds the real or complex roots of a polynomial equation.

* *

The function solvePoly finds real and complex roots of a * polynomial equation:

* *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] * = 0

* * @param coeffs array of polynomial coefficients. * @param roots output (complex) array of roots. * @param maxIters maximum number of iterations the algorithm does. * * @see org.opencv.core.Core.solvePoly */ public static double solvePoly(Mat coeffs, Mat roots, int maxIters) { double retVal = solvePoly_0(coeffs.nativeObj, roots.nativeObj, maxIters); return retVal; } /** *

Finds the real or complex roots of a polynomial equation.

* *

The function solvePoly finds real and complex roots of a * polynomial equation:

* *

coeffs [n] x^(n) + coeffs [n-1] x^(n-1) +... + coeffs [1] x + coeffs [0] * = 0

* * @param coeffs array of polynomial coefficients. * @param roots output (complex) array of roots. * * @see org.opencv.core.Core.solvePoly */ public static double solvePoly(Mat coeffs, Mat roots) { double retVal = solvePoly_1(coeffs.nativeObj, roots.nativeObj); return retVal; } // // C++: void sort(Mat src, Mat& dst, int flags) // /** *

Sorts each row or each column of a matrix.

* *

The function sort sorts each matrix row or each matrix column in * ascending or descending order. So you should pass two operation flags to get * desired behaviour. If you want to sort matrix rows or columns * lexicographically, you can use STL std.sort generic function * with the proper comparison predicate.

* * @param src input single-channel array. * @param dst output array of the same size and type as src. * @param flags operation flags, a combination of the following values: *
    *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this * flag and the previous one are mutually exclusive. *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; * this flag and the previous one are also mutually exclusive. *
* * @see org.opencv.core.Core.sort * @see org.opencv.core.Core#randShuffle * @see org.opencv.core.Core#sortIdx */ public static void sort(Mat src, Mat dst, int flags) { sort_0(src.nativeObj, dst.nativeObj, flags); return; } // // C++: void sortIdx(Mat src, Mat& dst, int flags) // /** *

Sorts each row or each column of a matrix.

* *

The function sortIdx sorts each matrix row or each matrix column * in the ascending or descending order. So you should pass two operation flags * to get desired behaviour. Instead of reordering the elements themselves, it * stores the indices of sorted elements in the output array. For example: *

* *

// C++ code:

* *

Mat A = Mat.eye(3,3,CV_32F), B;

* *

sortIdx(A, B, CV_SORT_EVERY_ROW + CV_SORT_ASCENDING);

* *

// B will probably contain

* *

// (because of equal elements in A some permutations are possible):

* *

// [[1, 2, 0], [0, 2, 1], [0, 1, 2]]

* * @param src input single-channel array. * @param dst output integer array of the same size as src. * @param flags operation flags that could be a combination of the following * values: *
    *
  • CV_SORT_EVERY_ROW each matrix row is sorted independently. *
  • CV_SORT_EVERY_COLUMN each matrix column is sorted independently; this * flag and the previous one are mutually exclusive. *
  • CV_SORT_ASCENDING each matrix row is sorted in the ascending order. *
  • CV_SORT_DESCENDING each matrix row is sorted in the descending order; * his flag and the previous one are also mutually exclusive. *
* * @see org.opencv.core.Core.sortIdx * @see org.opencv.core.Core#sort * @see org.opencv.core.Core#randShuffle */ public static void sortIdx(Mat src, Mat dst, int flags) { sortIdx_0(src.nativeObj, dst.nativeObj, flags); return; } // // C++: void split(Mat m, vector_Mat& mv) // /** *

Divides a multi-channel array into several single-channel arrays.

* *

The functions split split a multi-channel array into separate * single-channel arrays:

* *

mv [c](I) = src(I)_c

* *

If you need to extract a single channel or do some other sophisticated * channel permutation, use "mixChannels".

* * @param m a m * @param mv output array or vector of arrays; in the first variant of the * function the number of arrays must match src.channels(); the * arrays themselves are reallocated, if needed. * * @see org.opencv.core.Core.split * @see org.opencv.core.Core#merge * @see org.opencv.imgproc.Imgproc#cvtColor * @see org.opencv.core.Core#mixChannels */ public static void split(Mat m, List mv) { Mat mv_mat = new Mat(); split_0(m.nativeObj, mv_mat.nativeObj); Converters.Mat_to_vector_Mat(mv_mat, mv); return; } // // C++: void sqrt(Mat src, Mat& dst) // /** *

Calculates a square root of array elements.

* *

The functions sqrt calculate a square root of each input array * element. In case of multi-channel arrays, each channel is processed * independently. The accuracy is approximately the same as of the built-in * std.sqrt.

* * @param src input floating-point array. * @param dst output array of the same size and type as src. * * @see org.opencv.core.Core.sqrt * @see org.opencv.core.Core#pow * @see org.opencv.core.Core#magnitude */ public static void sqrt(Mat src, Mat dst) { sqrt_0(src.nativeObj, dst.nativeObj); return; } // // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) // /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * @param mask optional operation mask; this is an 8-bit single channel array * that specifies elements of the output array to be changed. * @param dtype optional depth of the output array (see the details below). * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask, int dtype) { subtract_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype); return; } /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * @param mask optional operation mask; this is an 8-bit single channel array * that specifies elements of the output array to be changed. * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Mat src2, Mat dst, Mat mask) { subtract_1(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Mat src2, Mat dst) { subtract_2(src1.nativeObj, src2.nativeObj, dst.nativeObj); return; } // // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) // /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * @param mask optional operation mask; this is an 8-bit single channel array * that specifies elements of the output array to be changed. * @param dtype optional depth of the output array (see the details below). * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask, int dtype) { subtract_3(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj, dtype); return; } /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * @param mask optional operation mask; this is an 8-bit single channel array * that specifies elements of the output array to be changed. * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Scalar src2, Mat dst, Mat mask) { subtract_4(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj, mask.nativeObj); return; } /** *

Calculates the per-element difference between two arrays or array and a * scalar.

* *

The function subtract calculates:

*
    *
  • Difference between two arrays, when both input arrays have the same * size and the same number of channels: *
* *

dst(I) = saturate(src1(I) - src2(I)) if mask(I) != 0

* *
    *
  • Difference between an array and a scalar, when src2 is * constructed from Scalar or has the same number of elements as * src1.channels(): *
* *

dst(I) = saturate(src1(I) - src2) if mask(I) != 0

* *
    *
  • Difference between a scalar and an array, when src1 is * constructed from Scalar or has the same number of elements as * src2.channels(): *
* *

dst(I) = saturate(src1 - src2(I)) if mask(I) != 0

* *
    *
  • The reverse difference between a scalar and an array in the case of * SubRS: *
* *

dst(I) = saturate(src2 - src1(I)) if mask(I) != 0

* *

where I is a multi-dimensional index of array elements. In case * of multi-channel arrays, each channel is processed independently. * The first function in the list above can be replaced with matrix expressions: *

* *

// C++ code:

* *

dst = src1 - src2;

* *

dst -= src1; // equivalent to subtract(dst, src1, dst);

* *

The input arrays and the output array can all have the same or different * depths. For example, you can subtract to 8-bit unsigned arrays and store the * difference in a 16-bit signed array. Depth of the output array is determined * by dtype parameter. In the second and third cases above, as well * as in the first case, when src1.depth() == src2.depth(), * dtype can be set to the default -1. In this case * the output array will have the same depth as the input array, be it * src1, src2 or both. *

* *

Note: Saturation is not applied when the output array has the depth * CV_32S. You may even get result of an incorrect sign in the case * of overflow.

* * @param src1 first input array or a scalar. * @param src2 second input array or a scalar. * @param dst output array of the same size and the same number of channels as * the input array. * * @see org.opencv.core.Core.subtract * @see org.opencv.core.Core#addWeighted * @see org.opencv.core.Core#add * @see org.opencv.core.Core#scaleAdd * @see org.opencv.core.Mat#convertTo */ public static void subtract(Mat src1, Scalar src2, Mat dst) { subtract_5(src1.nativeObj, src2.val[0], src2.val[1], src2.val[2], src2.val[3], dst.nativeObj); return; } // // C++: Scalar sum(Mat src) // /** *

Calculates the sum of array elements.

* *

The functions sum calculate and return the sum of array * elements, independently for each channel.

* * @param src a src * * @see org.opencv.core.Core.sum * @see org.opencv.core.Core#meanStdDev * @see org.opencv.core.Core#reduce * @see org.opencv.core.Core#minMaxLoc * @see org.opencv.core.Core#countNonZero * @see org.opencv.core.Core#norm * @see org.opencv.core.Core#mean */ public static Scalar sumElems(Mat src) { Scalar retVal = new Scalar(sumElems_0(src.nativeObj)); return retVal; } // // C++: Scalar trace(Mat mtx) // /** *

Returns the trace of a matrix.

* *

The function trace returns the sum of the diagonal elements of * the matrix mtx.

* *

tr(mtx) = sum _i mtx(i,i)

* * @param mtx a mtx * * @see org.opencv.core.Core.trace */ public static Scalar trace(Mat mtx) { Scalar retVal = new Scalar(trace_0(mtx.nativeObj)); return retVal; } // // C++: void transform(Mat src, Mat& dst, Mat m) // /** *

Performs the matrix transformation of every array element.

* *

The function transform performs the matrix transformation of * every element of the array src and stores the results in * dst :

* *

dst(I) = m * src(I)

* *

(when m.cols=src.channels()), or

* *

dst(I) = m * [ src(I); 1]

* *

(when m.cols=src.channels()+1)

* *

Every element of the N -channel array src is * interpreted as N -element vector that is transformed using the * M x N or M x (N+1) matrix m to * M-element vector - the corresponding element of the output array * dst.

* *

The function may be used for geometrical transformation of N * -dimensional points, arbitrary linear color space transformation (such as * various kinds of RGB to YUV transforms), shuffling the image channels, and so * forth.

* * @param src input array that must have as many channels (1 to 4) as * m.cols or m.cols-1. * @param dst output array of the same size and depth as src; it * has as many channels as m.rows. * @param m transformation 2x2 or 2x3 floating-point * matrix. * * @see org.opencv.core.Core.transform * @see org.opencv.imgproc.Imgproc#warpAffine * @see org.opencv.core.Core#perspectiveTransform * @see org.opencv.imgproc.Imgproc#warpPerspective * @see org.opencv.imgproc.Imgproc#getAffineTransform */ public static void transform(Mat src, Mat dst, Mat m) { transform_0(src.nativeObj, dst.nativeObj, m.nativeObj); return; } // // C++: void transpose(Mat src, Mat& dst) // /** *

Transposes a matrix.

* *

The function "transpose" transposes the matrix src :

* *

dst(i,j) = src(j,i)

* *

Note: No complex conjugation is done in case of a complex matrix. It it * should be done separately if needed.

* * @param src input array. * @param dst output array of the same type as src. * * @see org.opencv.core.Core.transpose */ public static void transpose(Mat src, Mat dst) { transpose_0(src.nativeObj, dst.nativeObj); return; } // // C++: void vconcat(vector_Mat src, Mat& dst) // public static void vconcat(List src, Mat dst) { Mat src_mat = Converters.vector_Mat_to_Mat(src); vconcat_0(src_mat.nativeObj, dst.nativeObj); return; } // manual port public static class MinMaxLocResult { public double minVal; public double maxVal; public Point minLoc; public Point maxLoc; public MinMaxLocResult() { minVal=0; maxVal=0; minLoc=new Point(); maxLoc=new Point(); } } // C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray()) /** *

Finds the global minimum and maximum in an array.

* *

The functions minMaxLoc find the minimum and maximum element * values and their positions. The extremums are searched across the whole array * or, if mask is not an empty array, in the specified array * region.

* *

The functions do not work with multi-channel arrays. If you need to find * minimum or maximum elements across all the channels, use "Mat.reshape" first * to reinterpret the array as single-channel. Or you may extract the particular * channel using either "extractImageCOI", or "mixChannels", or "split".

* * @param src input single-channel array. * @param mask optional mask used to select a sub-array. * * @see org.opencv.core.Core.minMaxLoc * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#min * @see org.opencv.core.Core#mixChannels * @see org.opencv.core.Mat#reshape * @see org.opencv.core.Core#split * @see org.opencv.core.Core#max * @see org.opencv.core.Core#inRange */ public static MinMaxLocResult minMaxLoc(Mat src, Mat mask) { MinMaxLocResult res = new MinMaxLocResult(); long maskNativeObj=0; if (mask != null) { maskNativeObj=mask.nativeObj; } double resarr[] = n_minMaxLocManual(src.nativeObj, maskNativeObj); res.minVal=resarr[0]; res.maxVal=resarr[1]; res.minLoc.x=resarr[2]; res.minLoc.y=resarr[3]; res.maxLoc.x=resarr[4]; res.maxLoc.y=resarr[5]; return res; } /** *

Finds the global minimum and maximum in an array.

* *

The functions minMaxLoc find the minimum and maximum element * values and their positions. The extremums are searched across the whole array * or, if mask is not an empty array, in the specified array * region.

* *

The functions do not work with multi-channel arrays. If you need to find * minimum or maximum elements across all the channels, use "Mat.reshape" first * to reinterpret the array as single-channel. Or you may extract the particular * channel using either "extractImageCOI", or "mixChannels", or "split".

* * @param src input single-channel array. * * @see org.opencv.core.Core.minMaxLoc * @see org.opencv.core.Core#compare * @see org.opencv.core.Core#min * @see org.opencv.core.Core#mixChannels * @see org.opencv.core.Mat#reshape * @see org.opencv.core.Core#split * @see org.opencv.core.Core#max * @see org.opencv.core.Core#inRange */ public static MinMaxLocResult minMaxLoc(Mat src) { return minMaxLoc(src, null); } // C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine); /** *

Calculates the width and height of a text string.

* *

The function getTextSize calculates and returns the size of a * box that contains the specified text.That is, the following code renders some * text, the tight box surrounding it, and the baseline:

* *

// C++ code:

* *

string text = "Funny text inside the box";

* *

int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;

* *

double fontScale = 2;

* *

int thickness = 3;

* *

Mat img(600, 800, CV_8UC3, Scalar.all(0));

* *

int baseline=0;

* *

Size textSize = getTextSize(text, fontFace,

* *

fontScale, thickness, &baseline);

* *

baseline += thickness;

* *

// center the text

* *

Point textOrg((img.cols - textSize.width)/2,

* *

(img.rows + textSize.height)/2);

* *

// draw the box

* *

rectangle(img, textOrg + Point(0, baseline),

* *

textOrg + Point(textSize.width, -textSize.height),

* *

Scalar(0,0,255));

* *

//... and the baseline first

* *

line(img, textOrg + Point(0, thickness),

* *

textOrg + Point(textSize.width, thickness),

* *

Scalar(0, 0, 255));

* *

// then put the text itself

* *

putText(img, text, textOrg, fontFace, fontScale,

* *

Scalar.all(255), thickness, 8);

* * @param text Input text string. * @param fontFace Font to use. See the "putText" for details. * @param fontScale Font scale. See the "putText" for details. * @param thickness Thickness of lines used to render the text. See "putText" * for details. * @param baseLine Output parameter - y-coordinate of the baseline relative to * the bottom-most text point. * * @see org.opencv.core.Core.getTextSize */ public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) { if(baseLine != null && baseLine.length != 1) throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'."); Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine)); return retVal; } // C++: void LUT(Mat src, Mat lut, Mat& dst, int interpolation = 0) private static native void LUT_0(long src_nativeObj, long lut_nativeObj, long dst_nativeObj, int interpolation); private static native void LUT_1(long src_nativeObj, long lut_nativeObj, long dst_nativeObj); // C++: double Mahalanobis(Mat v1, Mat v2, Mat icovar) private static native double Mahalanobis_0(long v1_nativeObj, long v2_nativeObj, long icovar_nativeObj); // C++: void PCABackProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) private static native void PCABackProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); // C++: void PCACompute(Mat data, Mat& mean, Mat& eigenvectors, int maxComponents = 0) private static native void PCACompute_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, int maxComponents); private static native void PCACompute_1(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj); // C++: void PCAComputeVar(Mat data, Mat& mean, Mat& eigenvectors, double retainedVariance) private static native void PCAComputeVar_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, double retainedVariance); // C++: void PCAProject(Mat data, Mat mean, Mat eigenvectors, Mat& result) private static native void PCAProject_0(long data_nativeObj, long mean_nativeObj, long eigenvectors_nativeObj, long result_nativeObj); // C++: void SVBackSubst(Mat w, Mat u, Mat vt, Mat rhs, Mat& dst) private static native void SVBackSubst_0(long w_nativeObj, long u_nativeObj, long vt_nativeObj, long rhs_nativeObj, long dst_nativeObj); // C++: void SVDecomp(Mat src, Mat& w, Mat& u, Mat& vt, int flags = 0) private static native void SVDecomp_0(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj, int flags); private static native void SVDecomp_1(long src_nativeObj, long w_nativeObj, long u_nativeObj, long vt_nativeObj); // C++: void absdiff(Mat src1, Mat src2, Mat& dst) private static native void absdiff_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void absdiff(Mat src1, Scalar src2, Mat& dst) private static native void absdiff_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: void add(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) private static native void add_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); private static native void add_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void add_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void add(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) private static native void add_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); private static native void add_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); private static native void add_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: void addWeighted(Mat src1, double alpha, Mat src2, double beta, double gamma, Mat& dst, int dtype = -1) private static native void addWeighted_0(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj, int dtype); private static native void addWeighted_1(long src1_nativeObj, double alpha, long src2_nativeObj, double beta, double gamma, long dst_nativeObj); // C++: void batchDistance(Mat src1, Mat src2, Mat& dist, int dtype, Mat& nidx, int normType = NORM_L2, int K = 0, Mat mask = Mat(), int update = 0, bool crosscheck = false) private static native void batchDistance_0(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K, long mask_nativeObj, int update, boolean crosscheck); private static native void batchDistance_1(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj, int normType, int K); private static native void batchDistance_2(long src1_nativeObj, long src2_nativeObj, long dist_nativeObj, int dtype, long nidx_nativeObj); // C++: void bitwise_and(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) private static native void bitwise_and_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void bitwise_and_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void bitwise_not(Mat src, Mat& dst, Mat mask = Mat()) private static native void bitwise_not_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void bitwise_not_1(long src_nativeObj, long dst_nativeObj); // C++: void bitwise_or(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) private static native void bitwise_or_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void bitwise_or_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void bitwise_xor(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) private static native void bitwise_xor_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void bitwise_xor_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void calcCovarMatrix(Mat samples, Mat& covar, Mat& mean, int flags, int ctype = CV_64F) private static native void calcCovarMatrix_0(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags, int ctype); private static native void calcCovarMatrix_1(long samples_nativeObj, long covar_nativeObj, long mean_nativeObj, int flags); // C++: void cartToPolar(Mat x, Mat y, Mat& magnitude, Mat& angle, bool angleInDegrees = false) private static native void cartToPolar_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj, boolean angleInDegrees); private static native void cartToPolar_1(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj, long angle_nativeObj); // C++: bool checkRange(Mat a, bool quiet = true, _hidden_ * pos = 0, double minVal = -DBL_MAX, double maxVal = DBL_MAX) private static native boolean checkRange_0(long a_nativeObj, boolean quiet, double minVal, double maxVal); private static native boolean checkRange_1(long a_nativeObj); // C++: void circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3); // C++: bool clipLine(Rect imgRect, Point& pt1, Point& pt2) private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out); // C++: void compare(Mat src1, Mat src2, Mat& dst, int cmpop) private static native void compare_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int cmpop); // C++: void compare(Mat src1, Scalar src2, Mat& dst, int cmpop) private static native void compare_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, int cmpop); // C++: void completeSymm(Mat& mtx, bool lowerToUpper = false) private static native void completeSymm_0(long mtx_nativeObj, boolean lowerToUpper); private static native void completeSymm_1(long mtx_nativeObj); // C++: void convertScaleAbs(Mat src, Mat& dst, double alpha = 1, double beta = 0) private static native void convertScaleAbs_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta); private static native void convertScaleAbs_1(long src_nativeObj, long dst_nativeObj); // C++: int countNonZero(Mat src) private static native int countNonZero_0(long src_nativeObj); // C++: float cubeRoot(float val) private static native float cubeRoot_0(float val); // C++: void dct(Mat src, Mat& dst, int flags = 0) private static native void dct_0(long src_nativeObj, long dst_nativeObj, int flags); private static native void dct_1(long src_nativeObj, long dst_nativeObj); // C++: double determinant(Mat mtx) private static native double determinant_0(long mtx_nativeObj); // C++: void dft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) private static native void dft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); private static native void dft_1(long src_nativeObj, long dst_nativeObj); // C++: void divide(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) private static native void divide_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); private static native void divide_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); private static native void divide_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void divide(double scale, Mat src2, Mat& dst, int dtype = -1) private static native void divide_3(double scale, long src2_nativeObj, long dst_nativeObj, int dtype); private static native void divide_4(double scale, long src2_nativeObj, long dst_nativeObj); // C++: void divide(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) private static native void divide_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); private static native void divide_6(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); private static native void divide_7(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: bool eigen(Mat src, bool computeEigenvectors, Mat& eigenvalues, Mat& eigenvectors) private static native boolean eigen_0(long src_nativeObj, boolean computeEigenvectors, long eigenvalues_nativeObj, long eigenvectors_nativeObj); // C++: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = 8) private static native void ellipse_3(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj); // C++: void exp(Mat src, Mat& dst) private static native void exp_0(long src_nativeObj, long dst_nativeObj); // C++: void extractChannel(Mat src, Mat& dst, int coi) private static native void extractChannel_0(long src_nativeObj, long dst_nativeObj, int coi); // C++: float fastAtan2(float y, float x) private static native float fastAtan2_0(float y, float x); // C++: void fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = 8, int shift = 0) private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift); private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = 8, int shift = 0, Point offset = Point()) private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y); private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void findNonZero(Mat src, Mat& idx) private static native void findNonZero_0(long src_nativeObj, long idx_nativeObj); // C++: void flip(Mat src, Mat& dst, int flipCode) private static native void flip_0(long src_nativeObj, long dst_nativeObj, int flipCode); // C++: void gemm(Mat src1, Mat src2, double alpha, Mat src3, double gamma, Mat& dst, int flags = 0) private static native void gemm_0(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj, int flags); private static native void gemm_1(long src1_nativeObj, long src2_nativeObj, double alpha, long src3_nativeObj, double gamma, long dst_nativeObj); // C++: string getBuildInformation() private static native String getBuildInformation_0(); // C++: int64 getCPUTickCount() private static native long getCPUTickCount_0(); // C++: int getNumberOfCPUs() private static native int getNumberOfCPUs_0(); // C++: int getOptimalDFTSize(int vecsize) private static native int getOptimalDFTSize_0(int vecsize); // C++: int64 getTickCount() private static native long getTickCount_0(); // C++: double getTickFrequency() private static native double getTickFrequency_0(); // C++: void hconcat(vector_Mat src, Mat& dst) private static native void hconcat_0(long src_mat_nativeObj, long dst_nativeObj); // C++: void idct(Mat src, Mat& dst, int flags = 0) private static native void idct_0(long src_nativeObj, long dst_nativeObj, int flags); private static native void idct_1(long src_nativeObj, long dst_nativeObj); // C++: void idft(Mat src, Mat& dst, int flags = 0, int nonzeroRows = 0) private static native void idft_0(long src_nativeObj, long dst_nativeObj, int flags, int nonzeroRows); private static native void idft_1(long src_nativeObj, long dst_nativeObj); // C++: void inRange(Mat src, Scalar lowerb, Scalar upperb, Mat& dst) private static native void inRange_0(long src_nativeObj, double lowerb_val0, double lowerb_val1, double lowerb_val2, double lowerb_val3, double upperb_val0, double upperb_val1, double upperb_val2, double upperb_val3, long dst_nativeObj); // C++: void insertChannel(Mat src, Mat& dst, int coi) private static native void insertChannel_0(long src_nativeObj, long dst_nativeObj, int coi); // C++: double invert(Mat src, Mat& dst, int flags = DECOMP_LU) private static native double invert_0(long src_nativeObj, long dst_nativeObj, int flags); private static native double invert_1(long src_nativeObj, long dst_nativeObj); // C++: double kmeans(Mat data, int K, Mat& bestLabels, TermCriteria criteria, int attempts, int flags, Mat& centers = Mat()) private static native double kmeans_0(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags, long centers_nativeObj); private static native double kmeans_1(long data_nativeObj, int K, long bestLabels_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, int attempts, int flags); // C++: void line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void log(Mat src, Mat& dst) private static native void log_0(long src_nativeObj, long dst_nativeObj); // C++: void magnitude(Mat x, Mat y, Mat& magnitude) private static native void magnitude_0(long x_nativeObj, long y_nativeObj, long magnitude_nativeObj); // C++: void max(Mat src1, Mat src2, Mat& dst) private static native void max_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void max(Mat src1, Scalar src2, Mat& dst) private static native void max_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: Scalar mean(Mat src, Mat mask = Mat()) private static native double[] mean_0(long src_nativeObj, long mask_nativeObj); private static native double[] mean_1(long src_nativeObj); // C++: void meanStdDev(Mat src, vector_double& mean, vector_double& stddev, Mat mask = Mat()) private static native void meanStdDev_0(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj, long mask_nativeObj); private static native void meanStdDev_1(long src_nativeObj, long mean_mat_nativeObj, long stddev_mat_nativeObj); // C++: void merge(vector_Mat mv, Mat& dst) private static native void merge_0(long mv_mat_nativeObj, long dst_nativeObj); // C++: void min(Mat src1, Mat src2, Mat& dst) private static native void min_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void min(Mat src1, Scalar src2, Mat& dst) private static native void min_1(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: void mixChannels(vector_Mat src, vector_Mat dst, vector_int fromTo) private static native void mixChannels_0(long src_mat_nativeObj, long dst_mat_nativeObj, long fromTo_mat_nativeObj); // C++: void mulSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) private static native void mulSpectrums_0(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags, boolean conjB); private static native void mulSpectrums_1(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags); // C++: void mulTransposed(Mat src, Mat& dst, bool aTa, Mat delta = Mat(), double scale = 1, int dtype = -1) private static native void mulTransposed_0(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale, int dtype); private static native void mulTransposed_1(long src_nativeObj, long dst_nativeObj, boolean aTa, long delta_nativeObj, double scale); private static native void mulTransposed_2(long src_nativeObj, long dst_nativeObj, boolean aTa); // C++: void multiply(Mat src1, Mat src2, Mat& dst, double scale = 1, int dtype = -1) private static native void multiply_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale, int dtype); private static native void multiply_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, double scale); private static native void multiply_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void multiply(Mat src1, Scalar src2, Mat& dst, double scale = 1, int dtype = -1) private static native void multiply_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale, int dtype); private static native void multiply_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, double scale); private static native void multiply_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: double norm(Mat src1, int normType = NORM_L2, Mat mask = Mat()) private static native double norm_0(long src1_nativeObj, int normType, long mask_nativeObj); private static native double norm_1(long src1_nativeObj, int normType); private static native double norm_2(long src1_nativeObj); // C++: double norm(Mat src1, Mat src2, int normType = NORM_L2, Mat mask = Mat()) private static native double norm_3(long src1_nativeObj, long src2_nativeObj, int normType, long mask_nativeObj); private static native double norm_4(long src1_nativeObj, long src2_nativeObj, int normType); private static native double norm_5(long src1_nativeObj, long src2_nativeObj); // C++: void normalize(Mat src, Mat& dst, double alpha = 1, double beta = 0, int norm_type = NORM_L2, int dtype = -1, Mat mask = Mat()) private static native void normalize_0(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype, long mask_nativeObj); private static native void normalize_1(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type, int dtype); private static native void normalize_2(long src_nativeObj, long dst_nativeObj, double alpha, double beta, int norm_type); private static native void normalize_3(long src_nativeObj, long dst_nativeObj); // C++: void patchNaNs(Mat& a, double val = 0) private static native void patchNaNs_0(long a_nativeObj, double val); private static native void patchNaNs_1(long a_nativeObj); // C++: void perspectiveTransform(Mat src, Mat& dst, Mat m) private static native void perspectiveTransform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); // C++: void phase(Mat x, Mat y, Mat& angle, bool angleInDegrees = false) private static native void phase_0(long x_nativeObj, long y_nativeObj, long angle_nativeObj, boolean angleInDegrees); private static native void phase_1(long x_nativeObj, long y_nativeObj, long angle_nativeObj); // C++: void polarToCart(Mat magnitude, Mat angle, Mat& x, Mat& y, bool angleInDegrees = false) private static native void polarToCart_0(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj, boolean angleInDegrees); private static native void polarToCart_1(long magnitude_nativeObj, long angle_nativeObj, long x_nativeObj, long y_nativeObj); // C++: void polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void pow(Mat src, double power, Mat& dst) private static native void pow_0(long src_nativeObj, double power, long dst_nativeObj); // C++: void putText(Mat img, string text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = 8, bool bottomLeftOrigin = false) private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin); private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void randShuffle_(Mat& dst, double iterFactor = 1.) private static native void randShuffle_0(long dst_nativeObj, double iterFactor); private static native void randShuffle_1(long dst_nativeObj); // C++: void randn(Mat& dst, double mean, double stddev) private static native void randn_0(long dst_nativeObj, double mean, double stddev); // C++: void randu(Mat& dst, double low, double high) private static native void randu_0(long dst_nativeObj, double low, double high); // C++: void rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = 8, int shift = 0) private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); // C++: void reduce(Mat src, Mat& dst, int dim, int rtype, int dtype = -1) private static native void reduce_0(long src_nativeObj, long dst_nativeObj, int dim, int rtype, int dtype); private static native void reduce_1(long src_nativeObj, long dst_nativeObj, int dim, int rtype); // C++: void repeat(Mat src, int ny, int nx, Mat& dst) private static native void repeat_0(long src_nativeObj, int ny, int nx, long dst_nativeObj); // C++: void scaleAdd(Mat src1, double alpha, Mat src2, Mat& dst) private static native void scaleAdd_0(long src1_nativeObj, double alpha, long src2_nativeObj, long dst_nativeObj); // C++: void setErrorVerbosity(bool verbose) private static native void setErrorVerbosity_0(boolean verbose); // C++: void setIdentity(Mat& mtx, Scalar s = Scalar(1)) private static native void setIdentity_0(long mtx_nativeObj, double s_val0, double s_val1, double s_val2, double s_val3); private static native void setIdentity_1(long mtx_nativeObj); // C++: bool solve(Mat src1, Mat src2, Mat& dst, int flags = DECOMP_LU) private static native boolean solve_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int flags); private static native boolean solve_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: int solveCubic(Mat coeffs, Mat& roots) private static native int solveCubic_0(long coeffs_nativeObj, long roots_nativeObj); // C++: double solvePoly(Mat coeffs, Mat& roots, int maxIters = 300) private static native double solvePoly_0(long coeffs_nativeObj, long roots_nativeObj, int maxIters); private static native double solvePoly_1(long coeffs_nativeObj, long roots_nativeObj); // C++: void sort(Mat src, Mat& dst, int flags) private static native void sort_0(long src_nativeObj, long dst_nativeObj, int flags); // C++: void sortIdx(Mat src, Mat& dst, int flags) private static native void sortIdx_0(long src_nativeObj, long dst_nativeObj, int flags); // C++: void split(Mat m, vector_Mat& mv) private static native void split_0(long m_nativeObj, long mv_mat_nativeObj); // C++: void sqrt(Mat src, Mat& dst) private static native void sqrt_0(long src_nativeObj, long dst_nativeObj); // C++: void subtract(Mat src1, Mat src2, Mat& dst, Mat mask = Mat(), int dtype = -1) private static native void subtract_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj, int dtype); private static native void subtract_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); private static native void subtract_2(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); // C++: void subtract(Mat src1, Scalar src2, Mat& dst, Mat mask = Mat(), int dtype = -1) private static native void subtract_3(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj, int dtype); private static native void subtract_4(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj, long mask_nativeObj); private static native void subtract_5(long src1_nativeObj, double src2_val0, double src2_val1, double src2_val2, double src2_val3, long dst_nativeObj); // C++: Scalar sum(Mat src) private static native double[] sumElems_0(long src_nativeObj); // C++: Scalar trace(Mat mtx) private static native double[] trace_0(long mtx_nativeObj); // C++: void transform(Mat src, Mat& dst, Mat m) private static native void transform_0(long src_nativeObj, long dst_nativeObj, long m_nativeObj); // C++: void transpose(Mat src, Mat& dst) private static native void transpose_0(long src_nativeObj, long dst_nativeObj); // C++: void vconcat(vector_Mat src, Mat& dst) private static native void vconcat_0(long src_mat_nativeObj, long dst_nativeObj); private static native double[] n_minMaxLocManual(long src_nativeObj, long mask_nativeObj); private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine); }